ngram
listlengths
0
67.8k
[ "pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow)", "Runner(sim).start() def run_jupyter(dir, fname, path=None, copy_dir=False): dir = dir.replace('/', os.path.sep) fname = os.path.join(dir,", "\"beam.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slowish @pytest.mark.jupyter_examples", "@pytest.mark.slow @pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle in Uniform Magnetic Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\",", "= [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow),", "Uniform Magnetic Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_main_shell(fname,", "is not None else {}) inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/')", "else {}) inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples def", "pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ] _pytest_params_example_conf = [pytest.param(f.replace('/',", "@pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples def", "for f, m in _examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver): sim", "@pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver): sim = Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start() def", "from nbconvert.preprocessors import ExecutePreprocessor with open(fname) as f: nb = nbformat.read(f, as_version=4) if", "inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\",", "import assert_dataclass_eq _examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\",", "subprocess from shutil import copytree import inject import pytest from ef.config.config import Config", "in Uniform Magnetic Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def", "test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\",", "def run_jupyter(dir, fname, path=None, copy_dir=False): dir = dir.replace('/', os.path.sep) fname = os.path.join(dir, fname)", "@pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'),", "universal_newlines=True) for line in result.stderr.split(\"\\n\"): assert line == '' or line.startswith(\"WARNING:\") assert result.stdout", "copytree(dir, path) ep = ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata': {'path': path}} if path is not", "(\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ] _pytest_params_example_conf =", "\"Single Particle in Uniform Magnetic Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\",", "tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir):", "tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir):", "_examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\",", "_pytest_params_example_conf) def test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver): sim = Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start() def run_jupyter(dir,", "nbformat from nbconvert.preprocessors import ExecutePreprocessor with open(fname) as f: nb = nbformat.read(f, as_version=4)", "os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir) result = subprocess.run(['ef', os.path.join(basedir, fname)], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) for", "tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slowish @pytest.mark.jupyter_examples def", "monkeypatch.chdir(tmpdir) result = subprocess.run(['ef', os.path.join(basedir, fname)], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) for line in", "stderr=subprocess.PIPE, universal_newlines=True) for line in result.stderr.split(\"\\n\"): assert line == '' or line.startswith(\"WARNING:\") assert", "fname = os.path.join(dir, fname) import nbformat from nbconvert.preprocessors import ExecutePreprocessor with open(fname) as", "as_version=4) if copy_dir: copytree(dir, path) ep = ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata': {'path': path}} if", "run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle in Uniform Magnetic Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.requires_install", "\"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle in Uniform Magnetic", "True) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples", "import nbformat from nbconvert.preprocessors import ExecutePreprocessor with open(fname) as f: nb = nbformat.read(f,", "test_main_shell(fname, tmpdir, monkeypatch): basedir = os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir) result = subprocess.run(['ef', os.path.join(basedir, fname)],", "\"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish", "()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow),", "with open(fname) as f: nb = nbformat.read(f, as_version=4) if copy_dir: copytree(dir, path) ep", "Config from ef.runner import Runner from ef.util.testing import assert_dataclass_eq _examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow),", "tmpdir.join('newdir'), True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_main_shell(fname, tmpdir, monkeypatch): basedir = os.path.join(os.path.dirname(__file__), '..')", "os.path.join(basedir, fname)], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) for line in result.stderr.split(\"\\n\"): assert line ==", "test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'), True)", "[pytest.param(f.replace('/', os.path.sep), marks=m) for f, m in _examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_example_conf(fname, tmpdir,", "ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata': {'path': path}} if path is not None else {}) inject.clear()", "run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_main_shell(fname, tmpdir, monkeypatch): basedir =", "@pytest.mark.slow @pytest.mark.jupyter_examples def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\",", "inject import pytest from ef.config.config import Config from ef.runner import Runner from ef.util.testing", "tmpdir, monkeypatch): basedir = os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir) result = subprocess.run(['ef', os.path.join(basedir, fname)], check=True,", "stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) for line in result.stderr.split(\"\\n\"): assert line == '' or line.startswith(\"WARNING:\")", "pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ] _pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep), marks=m)", "run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')),", "subprocess.run(['ef', os.path.join(basedir, fname)], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) for line in result.stderr.split(\"\\n\"): assert line", "for line in result.stderr.split(\"\\n\"): assert line == '' or line.startswith(\"WARNING:\") assert result.stdout !=", "test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples", "Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start() def run_jupyter(dir, fname, path=None, copy_dir=False): dir = dir.replace('/', os.path.sep) fname", "@pytest.mark.slow @pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\",", "nbformat.read(f, as_version=4) if copy_dir: copytree(dir, path) ep = ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata': {'path': path}}", "@pytest.mark.jupyter_examples def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir)", "run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle in Uniform", "ep.preprocess(nb, {'metadata': {'path': path}} if path is not None else {}) inject.clear() @pytest.mark.slowish", "path=None, copy_dir=False): dir = dir.replace('/', os.path.sep) fname = os.path.join(dir, fname) import nbformat from", "(\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ] _pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep), marks=m) for f, m in _examples_conf]", "def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'),", "def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle", "pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ] _pytest_params_example_conf", "not None else {}) inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow", "pytest from ef.config.config import Config from ef.runner import Runner from ef.util.testing import assert_dataclass_eq", "copy_dir=False): dir = dir.replace('/', os.path.sep) fname = os.path.join(dir, fname) import nbformat from nbconvert.preprocessors", "@pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\",", "run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples", "= subprocess.run(['ef', os.path.join(basedir, fname)], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) for line in result.stderr.split(\"\\n\"): assert", "dir = dir.replace('/', os.path.sep) fname = os.path.join(dir, fname) import nbformat from nbconvert.preprocessors import", "\"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def", "Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\",", "test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'), True)", "()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish),", "test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow", "\"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf')))", "Magnetic Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_main_shell(fname, tmpdir,", "fname) import nbformat from nbconvert.preprocessors import ExecutePreprocessor with open(fname) as f: nb =", "backend_and_solver): sim = Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start() def run_jupyter(dir, fname, path=None, copy_dir=False): dir =", "Particle in Uniform Magnetic Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf)", "os.path.sep), marks=m) for f, m in _examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_example_conf(fname, tmpdir, monkeypatch,", "def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'), True)", "{}) inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir):", "def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle in Uniform Magnetic Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'),", "tmpdir, monkeypatch, backend_and_solver): sim = Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start() def run_jupyter(dir, fname, path=None, copy_dir=False):", "sim = Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start() def run_jupyter(dir, fname, path=None, copy_dir=False): dir = dir.replace('/',", "if path is not None else {}) inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\",", "run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples", "def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'),", "@pytest.mark.slow @pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\",", "ExecutePreprocessor with open(fname) as f: nb = nbformat.read(f, as_version=4) if copy_dir: copytree(dir, path)", "@pytest.mark.jupyter_examples def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\",", "'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\",", "None else {}) inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples", "@pytest.mark.slow @pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\",", "tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_main_shell(fname, tmpdir, monkeypatch): basedir", "= os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir) result = subprocess.run(['ef', os.path.join(basedir, fname)], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)", "@pytest.mark.slowish @pytest.mark.jupyter_examples def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\",", "= Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start() def run_jupyter(dir, fname, path=None, copy_dir=False): dir = dir.replace('/', os.path.sep)", "f: nb = nbformat.read(f, as_version=4) if copy_dir: copytree(dir, path) ep = ExecutePreprocessor(timeout=600) ep.preprocess(nb,", "run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples def", "def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'), True)", "test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir): run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow", "in _examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver): sim = Config.from_fname(fname).make() monkeypatch.chdir(tmpdir)", "@pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\",", "(\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\",", "\"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_main_shell(fname, tmpdir, monkeypatch): basedir = os.path.join(os.path.dirname(__file__),", "def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'),", "{'path': path}} if path is not None else {}) inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples def", "if copy_dir: copytree(dir, path) ep = ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata': {'path': path}} if path", "_pytest_params_example_conf) def test_main_shell(fname, tmpdir, monkeypatch): basedir = os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir) result = subprocess.run(['ef',", "pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ] _pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep), marks=m) for f,", "m in _examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver): sim = Config.from_fname(fname).make()", "check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) for line in result.stderr.split(\"\\n\"): assert line == '' or", "= [pytest.param(f.replace('/', os.path.sep), marks=m) for f, m in _examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_example_conf(fname,", "= dir.replace('/', os.path.sep) fname = os.path.join(dir, fname) import nbformat from nbconvert.preprocessors import ExecutePreprocessor", "run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir)", "tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples", "import copytree import inject import pytest from ef.config.config import Config from ef.runner import", "@pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single", "= nbformat.read(f, as_version=4) if copy_dir: copytree(dir, path) ep = ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata': {'path':", "Runner from ef.util.testing import assert_dataclass_eq _examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish),", "run_jupyter(\"examples/drift_tube_potential\", \"potential.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow", "\"potential.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples", "tmpdir.join('newdir'), True) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish", "copytree import inject import pytest from ef.config.config import Config from ef.runner import Runner", "import ExecutePreprocessor with open(fname) as f: nb = nbformat.read(f, as_version=4) if copy_dir: copytree(dir,", "run_jupyter(dir, fname, path=None, copy_dir=False): dir = dir.replace('/', os.path.sep) fname = os.path.join(dir, fname) import", "[(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\",", "def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow", "ef.runner import Runner from ef.util.testing import assert_dataclass_eq _examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()),", "from shutil import copytree import inject import pytest from ef.config.config import Config from", "basedir = os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir) result = subprocess.run(['ef', os.path.join(basedir, fname)], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,", "import pytest from ef.config.config import Config from ef.runner import Runner from ef.util.testing import", "path) ep = ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata': {'path': path}} if path is not None", "tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle in Uniform Magnetic Field.ipynb\",", "import subprocess from shutil import copytree import inject import pytest from ef.config.config import", "os.path.join(dir, fname) import nbformat from nbconvert.preprocessors import ExecutePreprocessor with open(fname) as f: nb", "ef.config.config import Config from ef.runner import Runner from ef.util.testing import assert_dataclass_eq _examples_conf =", "fname, path=None, copy_dir=False): dir = dir.replace('/', os.path.sep) fname = os.path.join(dir, fname) import nbformat", "nbconvert.preprocessors import ExecutePreprocessor with open(fname) as f: nb = nbformat.read(f, as_version=4) if copy_dir:", "os import subprocess from shutil import copytree import inject import pytest from ef.config.config", "monkeypatch, backend_and_solver): sim = Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start() def run_jupyter(dir, fname, path=None, copy_dir=False): dir", "path is not None else {}) inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples def test_all_examples_visualize(): run_jupyter(\"examples/jupyter\", \"visualize_examples.ipynb\",", "True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_main_shell(fname, tmpdir, monkeypatch): basedir = os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir)", "path}} if path is not None else {}) inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples def test_all_examples_visualize():", "True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir):", "@pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir):", "\"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def", "Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_main_shell(fname, tmpdir, monkeypatch):", "(\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ] _pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep), marks=m) for f, m", "test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver): sim = Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start() def run_jupyter(dir, fname, path=None,", "True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples def", "pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ] _pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep), marks=m) for f, m in", "def test_main_shell(fname, tmpdir, monkeypatch): basedir = os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir) result = subprocess.run(['ef', os.path.join(basedir,", "from ef.runner import Runner from ef.util.testing import assert_dataclass_eq _examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\",", "(\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ] _pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep),", "marks=m) for f, m in _examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver):", "(\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\",", "import inject import pytest from ef.config.config import Config from ef.runner import Runner from", "result = subprocess.run(['ef', os.path.join(basedir, fname)], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) for line in result.stderr.split(\"\\n\"):", "True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle in Uniform Magnetic Field.ipynb\", tmpdir)", "from ef.config.config import Config from ef.runner import Runner from ef.util.testing import assert_dataclass_eq _examples_conf", "f, m in _examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver): sim =", "test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True)", "'..') monkeypatch.chdir(tmpdir) result = subprocess.run(['ef', os.path.join(basedir, fname)], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) for line", "fname)], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) for line in result.stderr.split(\"\\n\"): assert line == ''", "<filename>tests/test_examples.py import os import subprocess from shutil import copytree import inject import pytest", "(\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ]", "import Runner from ef.util.testing import assert_dataclass_eq _examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\",", "@pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle in Uniform Magnetic Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\",", "{'metadata': {'path': path}} if path is not None else {}) inject.clear() @pytest.mark.slowish @pytest.mark.jupyter_examples", "\"visualize_examples.ipynb\", 'examples/jupyter/') @pytest.mark.slow @pytest.mark.jupyter_examples def test_axially_symmetric_beam_contour(tmpdir): run_jupyter(\"examples/axially_symmetric_beam_contour\", \"axially_symmetric_beam_contour.ipynb\", tmpdir) @pytest.mark.slow @pytest.mark.jupyter_examples def test_drift_tube_potential(tmpdir):", "_pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep), marks=m) for f, m in _examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def", "(\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/tube_source_test/contour.conf\", pytest.mark.slow) ] _pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep), marks=m) for", "@pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\",", "= ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata': {'path': path}} if path is not None else {})", "test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle in Uniform Magnetic Field.ipynb\", tmpdir) run_jupyter(\"examples/single_particle_in_magnetic_field\", \"single_particle_in_magnetic_field.ipynb\", tmpdir.join('newdir'), True)", "shutil import copytree import inject import pytest from ef.config.config import Config from ef.runner", "ep = ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata': {'path': path}} if path is not None else", "assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir):", "monkeypatch): basedir = os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir) result = subprocess.run(['ef', os.path.join(basedir, fname)], check=True, stdout=subprocess.PIPE,", "monkeypatch.chdir(tmpdir) Runner(sim).start() def run_jupyter(dir, fname, path=None, copy_dir=False): dir = dir.replace('/', os.path.sep) fname =", "import Config from ef.runner import Runner from ef.util.testing import assert_dataclass_eq _examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\",", "def test_single_particle_in_free_space(tmpdir): run_jupyter(\"examples/single_particle_in_free_space\", \"single_particle_in_free_space.ipynb\", tmpdir.join('newdir'), True) assert_dataclass_eq(Config.from_fname(tmpdir.join('newdir').join('config.ini')), Config.from_fname(tmpdir.join('newdir').join('single_particle_in_free_space.conf'))) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\",", "pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow),", "= os.path.join(dir, fname) import nbformat from nbconvert.preprocessors import ExecutePreprocessor with open(fname) as f:", "test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_particle_in_magnetic_field(tmpdir): run_jupyter(\"examples/single_particle_in_magnetic_field\", \"Single Particle in", "open(fname) as f: nb = nbformat.read(f, as_version=4) if copy_dir: copytree(dir, path) ep =", "import os import subprocess from shutil import copytree import inject import pytest from", "copy_dir: copytree(dir, path) ep = ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata': {'path': path}} if path is", "nb = nbformat.read(f, as_version=4) if copy_dir: copytree(dir, path) ep = ExecutePreprocessor(timeout=600) ep.preprocess(nb, {'metadata':", "run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slowish", "@pytest.mark.requires_install @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_main_shell(fname, tmpdir, monkeypatch): basedir = os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir) result", "dir.replace('/', os.path.sep) fname = os.path.join(dir, fname) import nbformat from nbconvert.preprocessors import ExecutePreprocessor with", "(\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\",", "] _pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep), marks=m) for f, m in _examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf)", "assert_dataclass_eq _examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow),", "as f: nb = nbformat.read(f, as_version=4) if copy_dir: copytree(dir, path) ep = ExecutePreprocessor(timeout=600)", "@pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_tube_source(tmpdir): run_jupyter(\"examples/tube_source_test\", \"plot.ipynb\",", "True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_tube_source(tmpdir):", "from ef.util.testing import assert_dataclass_eq _examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\",", "@pytest.mark.jupyter_examples def test_single_particle_in_uniform_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_electric_field\", \"single_particle_in_uniform_electric_field.ipynb\", tmpdir) @pytest.mark.slowish @pytest.mark.jupyter_examples def test_single_particle_in_radial_electric_field(tmpdir): run_jupyter(\"examples/single_particle_in_radial_electric_field\", \"plot.ipynb\", tmpdir.join('newdir'),", "@pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_main_shell(fname, tmpdir, monkeypatch): basedir = os.path.join(os.path.dirname(__file__), '..') monkeypatch.chdir(tmpdir) result =", "(\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour.conf\",", "line in result.stderr.split(\"\\n\"): assert line == '' or line.startswith(\"WARNING:\") assert result.stdout != \"\"", "pytest.mark.slow) ] _pytest_params_example_conf = [pytest.param(f.replace('/', os.path.sep), marks=m) for f, m in _examples_conf] @pytest.mark.parametrize(\"fname\",", "_examples_conf] @pytest.mark.parametrize(\"fname\", _pytest_params_example_conf) def test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver): sim = Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start()", "pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ()), (\"examples/single_particle_in_magnetic_field/single_particle_in_magnetic_field.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/large_time_step.conf\", pytest.mark.slow), (\"examples/single_particle_in_magnetic_field/long_simulation_time.conf\", pytest.mark.slow), (\"examples/ribbon_beam_contour/contour_bin.conf\", pytest.mark.slowish), (\"examples/drift_tube_potential/pot.conf\", pytest.mark.slow),", "os.path.sep) fname = os.path.join(dir, fname) import nbformat from nbconvert.preprocessors import ExecutePreprocessor with open(fname)", "tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def test_ribbon_beam_contour(tmpdir): run_jupyter(\"examples/ribbon_beam_contour\", \"beam.ipynb\", tmpdir.join('newdir'), True) @pytest.mark.slow @pytest.mark.jupyter_examples def", "def test_example_conf(fname, tmpdir, monkeypatch, backend_and_solver): sim = Config.from_fname(fname).make() monkeypatch.chdir(tmpdir) Runner(sim).start() def run_jupyter(dir, fname,", "ef.util.testing import assert_dataclass_eq _examples_conf = [(\"examples/axially_symmetric_beam_contour/contour.conf\", pytest.mark.slow), (\"examples/minimal_working_example/minimal_conf.conf\", ()), (\"examples/single_particle_in_free_space/single_particle_in_free_space.conf\", pytest.mark.slowish), (\"examples/single_particle_in_radial_electric_field/single_particle_in_radial_electric_field.conf\", ())," ]
[ "# License: Apache-2.0 from gators.encoders import WOEEncoder import pytest def test_init(): with pytest.raises(TypeError):", "License: Apache-2.0 from gators.encoders import WOEEncoder import pytest def test_init(): with pytest.raises(TypeError): WOEEncoder(dtype=str)" ]
[ "if not real_name and not email_address: continue if not validate_email(email_address): continue print(formataddr((real_name, email_address)))", "validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for i in range(emails_to_validate): real_name, email_address = parseaddr(input()) if not", "= re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for i in range(emails_to_validate): real_name,", "range(emails_to_validate): real_name, email_address = parseaddr(input()) if not real_name and not email_address: continue if", "EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for i in range(emails_to_validate):", "def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for i in range(emails_to_validate): real_name, email_address = parseaddr(input()) if", "re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for i in range(emails_to_validate): real_name, email_address", ") def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for i in range(emails_to_validate): real_name, email_address = parseaddr(input())", "parseaddr(input()) if not real_name and not email_address: continue if not validate_email(email_address): continue print(formataddr((real_name,", "re emails_to_validate = int(input()) EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess))", "from email.utils import formataddr, parseaddr import re emails_to_validate = int(input()) EMAIL_RE = re.compile(", "i in range(emails_to_validate): real_name, email_address = parseaddr(input()) if not real_name and not email_address:", "int(input()) EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for i in", "formataddr, parseaddr import re emails_to_validate = int(input()) EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def", "= int(input()) EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for i", "in range(emails_to_validate): real_name, email_address = parseaddr(input()) if not real_name and not email_address: continue", "import re emails_to_validate = int(input()) EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess): return", "parseaddr import re emails_to_validate = int(input()) EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess):", "r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for i in range(emails_to_validate): real_name, email_address =", "bool(EMAIL_RE.match(email_guess)) for i in range(emails_to_validate): real_name, email_address = parseaddr(input()) if not real_name and", "emails_to_validate = int(input()) EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' ) def validate_email(email_guess): return bool(EMAIL_RE.match(email_guess)) for", "= parseaddr(input()) if not real_name and not email_address: continue if not validate_email(email_address): continue", "real_name, email_address = parseaddr(input()) if not real_name and not email_address: continue if not", "email_address = parseaddr(input()) if not real_name and not email_address: continue if not validate_email(email_address):", "email.utils import formataddr, parseaddr import re emails_to_validate = int(input()) EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$'", "import formataddr, parseaddr import re emails_to_validate = int(input()) EMAIL_RE = re.compile( r'^[a-zA-Z][a-zA-Z0-9._-]*@[a-zA-Z]+\\.[a-zA-Z]{1,3}$' )", "return bool(EMAIL_RE.match(email_guess)) for i in range(emails_to_validate): real_name, email_address = parseaddr(input()) if not real_name", "for i in range(emails_to_validate): real_name, email_address = parseaddr(input()) if not real_name and not" ]
[ "= '<PASSWORD>' from alembic import op import sqlalchemy as sa def upgrade(): ###", "- please adjust! ### op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False) ###", "ID: 78ac40739c16 Revises: <PASSWORD> Create Date: 2016-09-05 17:54:40.327821 \"\"\" # revision identifiers, used", "downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_posts_title'), table_name='posts')", "sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please", "auto generated by Alembic - please adjust! ### op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'),", "Alembic - please adjust! ### op.drop_index(op.f('ix_posts_title'), table_name='posts') op.drop_column('posts', 'title') ### end Alembic commands", "auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_posts_title'), table_name='posts') op.drop_column('posts', 'title') ###", "generated by Alembic - please adjust! ### op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts',", "sa.Column('title', sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False) ### end Alembic commands ### def", "['title'], unique=False) ### end Alembic commands ### def downgrade(): ### commands auto generated", "Alembic - please adjust! ### op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False)", "used by Alembic. revision = '78ac40739c16' down_revision = '<PASSWORD>' from alembic import op", "Alembic. revision = '78ac40739c16' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy", "by Alembic - please adjust! ### op.drop_index(op.f('ix_posts_title'), table_name='posts') op.drop_column('posts', 'title') ### end Alembic", "= '78ac40739c16' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as sa", "end Alembic commands ### def downgrade(): ### commands auto generated by Alembic -", "Revises: <PASSWORD> Create Date: 2016-09-05 17:54:40.327821 \"\"\" # revision identifiers, used by Alembic.", "def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_posts_title'),", "as sa def upgrade(): ### commands auto generated by Alembic - please adjust!", "nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False) ### end Alembic commands ### def downgrade(): ###", "Revision ID: 78ac40739c16 Revises: <PASSWORD> Create Date: 2016-09-05 17:54:40.327821 \"\"\" # revision identifiers,", "<reponame>Renzf2015/myblog \"\"\"model-post-title add Revision ID: 78ac40739c16 Revises: <PASSWORD> Create Date: 2016-09-05 17:54:40.327821 \"\"\"", "by Alembic. revision = '78ac40739c16' down_revision = '<PASSWORD>' from alembic import op import", "### def downgrade(): ### commands auto generated by Alembic - please adjust! ###", "import op import sqlalchemy as sa def upgrade(): ### commands auto generated by", "### op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False) ### end Alembic commands", "\"\"\" # revision identifiers, used by Alembic. revision = '78ac40739c16' down_revision = '<PASSWORD>'", "op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False) ### end Alembic commands ### def downgrade(): ### commands", "17:54:40.327821 \"\"\" # revision identifiers, used by Alembic. revision = '78ac40739c16' down_revision =", "\"\"\"model-post-title add Revision ID: 78ac40739c16 Revises: <PASSWORD> Create Date: 2016-09-05 17:54:40.327821 \"\"\" #", "please adjust! ### op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False) ### end", "generated by Alembic - please adjust! ### op.drop_index(op.f('ix_posts_title'), table_name='posts') op.drop_column('posts', 'title') ### end", "### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_posts_title'), table_name='posts') op.drop_column('posts',", "78ac40739c16 Revises: <PASSWORD> Create Date: 2016-09-05 17:54:40.327821 \"\"\" # revision identifiers, used by", "<PASSWORD> Create Date: 2016-09-05 17:54:40.327821 \"\"\" # revision identifiers, used by Alembic. revision", "# revision identifiers, used by Alembic. revision = '78ac40739c16' down_revision = '<PASSWORD>' from", "down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as sa def upgrade():", "Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please", "adjust! ### op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False) ### end Alembic", "unique=False) ### end Alembic commands ### def downgrade(): ### commands auto generated by", "Date: 2016-09-05 17:54:40.327821 \"\"\" # revision identifiers, used by Alembic. revision = '78ac40739c16'", "### commands auto generated by Alembic - please adjust! ### op.add_column('posts', sa.Column('title', sa.String(length=64),", "sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False) ### end Alembic commands ### def downgrade():", "add Revision ID: 78ac40739c16 Revises: <PASSWORD> Create Date: 2016-09-05 17:54:40.327821 \"\"\" # revision", "revision identifiers, used by Alembic. revision = '78ac40739c16' down_revision = '<PASSWORD>' from alembic", "identifiers, used by Alembic. revision = '78ac40739c16' down_revision = '<PASSWORD>' from alembic import", "revision = '78ac40739c16' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as", "'posts', ['title'], unique=False) ### end Alembic commands ### def downgrade(): ### commands auto", "sa def upgrade(): ### commands auto generated by Alembic - please adjust! ###", "commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_posts_title'), table_name='posts') op.drop_column('posts', 'title')", "commands ### def downgrade(): ### commands auto generated by Alembic - please adjust!", "commands auto generated by Alembic - please adjust! ### op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True))", "'<PASSWORD>' from alembic import op import sqlalchemy as sa def upgrade(): ### commands", "- please adjust! ### op.drop_index(op.f('ix_posts_title'), table_name='posts') op.drop_column('posts', 'title') ### end Alembic commands ###", "alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated", "### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic", "def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('posts',", "2016-09-05 17:54:40.327821 \"\"\" # revision identifiers, used by Alembic. revision = '78ac40739c16' down_revision", "'78ac40739c16' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as sa def", "op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic", "upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('posts', sa.Column('title',", "import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic -", "from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto", "by Alembic - please adjust! ### op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts', ['title'],", "Create Date: 2016-09-05 17:54:40.327821 \"\"\" # revision identifiers, used by Alembic. revision =", "op.add_column('posts', sa.Column('title', sa.String(length=64), nullable=True)) op.create_index(op.f('ix_posts_title'), 'posts', ['title'], unique=False) ### end Alembic commands ###" ]
[ "<filename>item_59/waste_memory.py import os import hashlib class MyObject(object): def __init__(self): self.x = os.urandom(100) self.y", "[] for _ in range(100): obj = MyObject() values.append(obj) return values def run():", "hashlib.sha1(self.x).hexdigest() def get_data(): values = [] for _ in range(100): obj = MyObject()", "def __init__(self): self.x = os.urandom(100) self.y = hashlib.sha1(self.x).hexdigest() def get_data(): values = []", "values def run(): deep_values = [] for _ in range(100): deep_values.append(get_data()) return deep_values", "def get_data(): values = [] for _ in range(100): obj = MyObject() values.append(obj)", "= hashlib.sha1(self.x).hexdigest() def get_data(): values = [] for _ in range(100): obj =", "self.y = hashlib.sha1(self.x).hexdigest() def get_data(): values = [] for _ in range(100): obj", "return values def run(): deep_values = [] for _ in range(100): deep_values.append(get_data()) return", "self.x = os.urandom(100) self.y = hashlib.sha1(self.x).hexdigest() def get_data(): values = [] for _", "in range(100): obj = MyObject() values.append(obj) return values def run(): deep_values = []", "range(100): obj = MyObject() values.append(obj) return values def run(): deep_values = [] for", "MyObject(object): def __init__(self): self.x = os.urandom(100) self.y = hashlib.sha1(self.x).hexdigest() def get_data(): values =", "import os import hashlib class MyObject(object): def __init__(self): self.x = os.urandom(100) self.y =", "values.append(obj) return values def run(): deep_values = [] for _ in range(100): deep_values.append(get_data())", "hashlib class MyObject(object): def __init__(self): self.x = os.urandom(100) self.y = hashlib.sha1(self.x).hexdigest() def get_data():", "for _ in range(100): obj = MyObject() values.append(obj) return values def run(): deep_values", "_ in range(100): obj = MyObject() values.append(obj) return values def run(): deep_values =", "os.urandom(100) self.y = hashlib.sha1(self.x).hexdigest() def get_data(): values = [] for _ in range(100):", "os import hashlib class MyObject(object): def __init__(self): self.x = os.urandom(100) self.y = hashlib.sha1(self.x).hexdigest()", "__init__(self): self.x = os.urandom(100) self.y = hashlib.sha1(self.x).hexdigest() def get_data(): values = [] for", "import hashlib class MyObject(object): def __init__(self): self.x = os.urandom(100) self.y = hashlib.sha1(self.x).hexdigest() def", "get_data(): values = [] for _ in range(100): obj = MyObject() values.append(obj) return", "= [] for _ in range(100): obj = MyObject() values.append(obj) return values def", "values = [] for _ in range(100): obj = MyObject() values.append(obj) return values", "MyObject() values.append(obj) return values def run(): deep_values = [] for _ in range(100):", "class MyObject(object): def __init__(self): self.x = os.urandom(100) self.y = hashlib.sha1(self.x).hexdigest() def get_data(): values", "= MyObject() values.append(obj) return values def run(): deep_values = [] for _ in", "obj = MyObject() values.append(obj) return values def run(): deep_values = [] for _", "= os.urandom(100) self.y = hashlib.sha1(self.x).hexdigest() def get_data(): values = [] for _ in" ]
[ "The file path of the messages csv categories_filepath (string): The file path of", "data...\\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please provide", "Returns: df (pandas dataframe): The combined messages and categories df \"\"\" messages =", "0, inplace=True) return df def save_data(df, database_filename): \"\"\"Saves the resulting data to a", "of the database to save the cleaned data '\\ 'to as the third", "categories and messages df Returns: df (pandas dataframe): Cleaned dataframe with split categories", "categories.applymap(lambda s: int(s[-1])) # add the categories back to the original df df.drop('categories',", "third argument. \\n\\nExample: python process_data.py '\\ 'disaster_messages.csv disaster_categories.csv '\\ 'DisasterResponse.db') if __name__ ==", "database_filename): \"\"\"Saves the resulting data to a sqlite db Args: df (pandas dataframe):", "data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0, inplace=True) return df def save_data(df, database_filename):", "index=False, if_exists='replace') engine.dispose() def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath =", "to save the cleaned data '\\ 'to as the third argument. \\n\\nExample: python", "create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False, if_exists='replace') engine.dispose() def main(): if len(sys.argv) == 4: messages_filepath,", "pd.concat([df, categories], axis=1) # clean up the final data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True)", "df (pandas dataframe): Cleaned dataframe with split categories \"\"\" # expand the categories", "first and second argument respectively, as '\\ 'well as the filepath of the", "argument. \\n\\nExample: python process_data.py '\\ 'disaster_messages.csv disaster_categories.csv '\\ 'DisasterResponse.db') if __name__ == '__main__':", "'\\ 'well as the filepath of the database to save the cleaned data", "load_data(messages_filepath, categories_filepath): \"\"\"loads the specified message and category data Args: messages_filepath (string): The", "df.to_sql('labeled_messages', engine, index=False, if_exists='replace') engine.dispose() def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath,", "from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): \"\"\"loads the specified message and category", "== 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'", "Returns: None \"\"\" engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False, if_exists='replace') engine.dispose() def main():", "cleans up the categories column Args: df (pandas dataframe): combined categories and messages", "the original df df.drop('categories', axis=1, inplace=True) df = pd.concat([df, categories], axis=1) # clean", "(string): The file path of the categories cv Returns: df (pandas dataframe): The", "sys.argv[1:] print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath)", "data saved to database!') else: print('Please provide the filepaths of the messages and", "as '\\ 'well as the filepath of the database to save the cleaned", "df (pandas dataframe): The cleaned dataframe database_filename (string): the file path to save", "(string): The file path of the messages csv categories_filepath (string): The file path", "to the original df df.drop('categories', axis=1, inplace=True) df = pd.concat([df, categories], axis=1) #", "inplace=True) df.related.replace(2, 0, inplace=True) return df def save_data(df, database_filename): \"\"\"Saves the resulting data", "- drops duplicates - removes messages missing classes - cleans up the categories", "categories df \"\"\" messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return pd.merge(messages, categories, on='id')", "if_exists='replace') engine.dispose() def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:]", "original df df.drop('categories', axis=1, inplace=True) df = pd.concat([df, categories], axis=1) # clean up", "The combined messages and categories df \"\"\" messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath)", "data...\\n MESSAGES: {}\\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...')", "MESSAGES: {}\\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df", "database_filename (string): the file path to save the db Returns: None \"\"\" engine", "value as an integer categories = categories.applymap(lambda s: int(s[-1])) # add the categories", "the messages and categories '\\ 'datasets as the first and second argument respectively,", "df = pd.concat([df, categories], axis=1) # clean up the final data df.drop_duplicates(subset='message', inplace=True)", "save the cleaned data '\\ 'to as the third argument. \\n\\nExample: python process_data.py", "= pd.read_csv(categories_filepath) return pd.merge(messages, categories, on='id') def clean_data(df): \"\"\"Cleans the data: - drops", "an integer categories = categories.applymap(lambda s: int(s[-1])) # add the categories back to", "df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0, inplace=True) return df def save_data(df, database_filename): \"\"\"Saves", "inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0, inplace=True) return df def save_data(df, database_filename): \"\"\"Saves the", "removes messages missing classes - cleans up the categories column Args: df (pandas", "messages csv categories_filepath (string): The file path of the categories cv Returns: df", "= df.categories.str.split(';', expand=True) row = categories[:1] # get the category names category_colnames =", "= category_colnames # get only the last value in each value as an", "categories[:1] # get the category names category_colnames = row.applymap(lambda s: s[:-2]).iloc[0, :].tolist() categories.columns", "names category_colnames = row.applymap(lambda s: s[:-2]).iloc[0, :].tolist() categories.columns = category_colnames # get only", "on='id') def clean_data(df): \"\"\"Cleans the data: - drops duplicates - removes messages missing", "df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0, inplace=True) return df def save_data(df, database_filename): \"\"\"Saves the resulting", "data to a sqlite db Args: df (pandas dataframe): The cleaned dataframe database_filename", "'\\ 'datasets as the first and second argument respectively, as '\\ 'well as", ".format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\\n", "'to as the third argument. \\n\\nExample: python process_data.py '\\ 'disaster_messages.csv disaster_categories.csv '\\ 'DisasterResponse.db')", "dataframe): combined categories and messages df Returns: df (pandas dataframe): Cleaned dataframe with", "expand the categories column categories = df.categories.str.split(';', expand=True) row = categories[:1] # get", "'\\ 'to as the third argument. \\n\\nExample: python process_data.py '\\ 'disaster_messages.csv disaster_categories.csv '\\", "axis=1, inplace=True) df = pd.concat([df, categories], axis=1) # clean up the final data", "value in each value as an integer categories = categories.applymap(lambda s: int(s[-1])) #", "\"\"\"Saves the resulting data to a sqlite db Args: df (pandas dataframe): The", "(pandas dataframe): combined categories and messages df Returns: df (pandas dataframe): Cleaned dataframe", "and categories df \"\"\" messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return pd.merge(messages, categories,", "the categories cv Returns: df (pandas dataframe): The combined messages and categories df", "the categories column categories = df.categories.str.split(';', expand=True) row = categories[:1] # get the", "df.categories.str.split(';', expand=True) row = categories[:1] # get the category names category_colnames = row.applymap(lambda", "dataframe database_filename (string): the file path to save the db Returns: None \"\"\"", "pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): \"\"\"loads the specified message and", "data...') df = clean_data(df) print('Saving data...\\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved", "import pandas as pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): \"\"\"loads the", "(pandas dataframe): The cleaned dataframe database_filename (string): the file path to save the", "resulting data to a sqlite db Args: df (pandas dataframe): The cleaned dataframe", "to database!') else: print('Please provide the filepaths of the messages and categories '\\", "messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return pd.merge(messages, categories, on='id') def clean_data(df): \"\"\"Cleans", "data '\\ 'to as the third argument. \\n\\nExample: python process_data.py '\\ 'disaster_messages.csv disaster_categories.csv", "db Returns: None \"\"\" engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False, if_exists='replace') engine.dispose() def", "messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}' .format(messages_filepath, categories_filepath))", "duplicates - removes messages missing classes - cleans up the categories column Args:", "print('Please provide the filepaths of the messages and categories '\\ 'datasets as the", "CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df)", "and categories '\\ 'datasets as the first and second argument respectively, as '\\", "= pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return pd.merge(messages, categories, on='id') def clean_data(df): \"\"\"Cleans the", "dataframe): The cleaned dataframe database_filename (string): the file path to save the db", "Args: df (pandas dataframe): The cleaned dataframe database_filename (string): the file path to", "print('Saving data...\\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please", "each value as an integer categories = categories.applymap(lambda s: int(s[-1])) # add the", "second argument respectively, as '\\ 'well as the filepath of the database to", "- removes messages missing classes - cleans up the categories column Args: df", "cleaned data '\\ 'to as the third argument. \\n\\nExample: python process_data.py '\\ 'disaster_messages.csv", "and messages df Returns: df (pandas dataframe): Cleaned dataframe with split categories \"\"\"", "specified message and category data Args: messages_filepath (string): The file path of the", "row = categories[:1] # get the category names category_colnames = row.applymap(lambda s: s[:-2]).iloc[0,", "Returns: df (pandas dataframe): Cleaned dataframe with split categories \"\"\" # expand the", "= categories[:1] # get the category names category_colnames = row.applymap(lambda s: s[:-2]).iloc[0, :].tolist()", "\"\"\" # expand the categories column categories = df.categories.str.split(';', expand=True) row = categories[:1]", "import sys import pandas as pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath):", "\"\"\" messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return pd.merge(messages, categories, on='id') def clean_data(df):", "with split categories \"\"\" # expand the categories column categories = df.categories.str.split(';', expand=True)", "integer categories = categories.applymap(lambda s: int(s[-1])) # add the categories back to the", "s[:-2]).iloc[0, :].tolist() categories.columns = category_colnames # get only the last value in each", "categories = categories.applymap(lambda s: int(s[-1])) # add the categories back to the original", "the categories back to the original df df.drop('categories', axis=1, inplace=True) df = pd.concat([df,", "categories '\\ 'datasets as the first and second argument respectively, as '\\ 'well", "file path of the messages csv categories_filepath (string): The file path of the", "df.drop('categories', axis=1, inplace=True) df = pd.concat([df, categories], axis=1) # clean up the final", "# add the categories back to the original df df.drop('categories', axis=1, inplace=True) df", "df \"\"\" messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return pd.merge(messages, categories, on='id') def", "as the filepath of the database to save the cleaned data '\\ 'to", "combined messages and categories df \"\"\" messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return", "saved to database!') else: print('Please provide the filepaths of the messages and categories", "messages_filepath (string): The file path of the messages csv categories_filepath (string): The file", "\"\"\"loads the specified message and category data Args: messages_filepath (string): The file path", "filepath of the database to save the cleaned data '\\ 'to as the", "dataframe): Cleaned dataframe with split categories \"\"\" # expand the categories column categories", "df (pandas dataframe): combined categories and messages df Returns: df (pandas dataframe): Cleaned", "in each value as an integer categories = categories.applymap(lambda s: int(s[-1])) # add", "path of the messages csv categories_filepath (string): The file path of the categories", "int(s[-1])) # add the categories back to the original df df.drop('categories', axis=1, inplace=True)", "df Returns: df (pandas dataframe): Cleaned dataframe with split categories \"\"\" # expand", "categories = df.categories.str.split(';', expand=True) row = categories[:1] # get the category names category_colnames", "add the categories back to the original df df.drop('categories', axis=1, inplace=True) df =", "engine, index=False, if_exists='replace') engine.dispose() def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath", "save the db Returns: None \"\"\" engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False, if_exists='replace')", "the database to save the cleaned data '\\ 'to as the third argument.", "cv Returns: df (pandas dataframe): The combined messages and categories df \"\"\" messages", "main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\\n MESSAGES:", "Args: df (pandas dataframe): combined categories and messages df Returns: df (pandas dataframe):", "axis=1) # clean up the final data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0,", "the first and second argument respectively, as '\\ 'well as the filepath of", "else: print('Please provide the filepaths of the messages and categories '\\ 'datasets as", "column categories = df.categories.str.split(';', expand=True) row = categories[:1] # get the category names", "category_colnames # get only the last value in each value as an integer", "to a sqlite db Args: df (pandas dataframe): The cleaned dataframe database_filename (string):", "split categories \"\"\" # expand the categories column categories = df.categories.str.split(';', expand=True) row", "to save the db Returns: None \"\"\" engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False,", "expand=True) row = categories[:1] # get the category names category_colnames = row.applymap(lambda s:", "= create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False, if_exists='replace') engine.dispose() def main(): if len(sys.argv) == 4:", "database to save the cleaned data '\\ 'to as the third argument. \\n\\nExample:", "print('Cleaned data saved to database!') else: print('Please provide the filepaths of the messages", "def load_data(messages_filepath, categories_filepath): \"\"\"loads the specified message and category data Args: messages_filepath (string):", "pd.read_csv(categories_filepath) return pd.merge(messages, categories, on='id') def clean_data(df): \"\"\"Cleans the data: - drops duplicates", "# get only the last value in each value as an integer categories", "dataframe): The combined messages and categories df \"\"\" messages = pd.read_csv(messages_filepath) categories =", "s: s[:-2]).iloc[0, :].tolist() categories.columns = category_colnames # get only the last value in", "messages missing classes - cleans up the categories column Args: df (pandas dataframe):", "return df def save_data(df, database_filename): \"\"\"Saves the resulting data to a sqlite db", "engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False, if_exists='replace') engine.dispose() def main(): if len(sys.argv) ==", "4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}' .format(messages_filepath,", "df = clean_data(df) print('Saving data...\\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to", "messages and categories df \"\"\" messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return pd.merge(messages,", "as the third argument. \\n\\nExample: python process_data.py '\\ 'disaster_messages.csv disaster_categories.csv '\\ 'DisasterResponse.db') if", "of the messages csv categories_filepath (string): The file path of the categories cv", "(pandas dataframe): Cleaned dataframe with split categories \"\"\" # expand the categories column", "sqlite db Args: df (pandas dataframe): The cleaned dataframe database_filename (string): the file", "Args: messages_filepath (string): The file path of the messages csv categories_filepath (string): The", "clean_data(df): \"\"\"Cleans the data: - drops duplicates - removes messages missing classes -", "the final data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0, inplace=True) return df def", "df (pandas dataframe): The combined messages and categories df \"\"\" messages = pd.read_csv(messages_filepath)", "path of the categories cv Returns: df (pandas dataframe): The combined messages and", "categories, on='id') def clean_data(df): \"\"\"Cleans the data: - drops duplicates - removes messages", "inplace=True) df = pd.concat([df, categories], axis=1) # clean up the final data df.drop_duplicates(subset='message',", "file path of the categories cv Returns: df (pandas dataframe): The combined messages", "categories back to the original df df.drop('categories', axis=1, inplace=True) df = pd.concat([df, categories],", "data: - drops duplicates - removes messages missing classes - cleans up the", "the resulting data to a sqlite db Args: df (pandas dataframe): The cleaned", "= clean_data(df) print('Saving data...\\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!')", "def clean_data(df): \"\"\"Cleans the data: - drops duplicates - removes messages missing classes", "save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please provide the filepaths of", "db Args: df (pandas dataframe): The cleaned dataframe database_filename (string): the file path", "pandas as pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): \"\"\"loads the specified", "print('Cleaning data...') df = clean_data(df) print('Saving data...\\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data", "categories], axis=1) # clean up the final data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2,", "categories_filepath): \"\"\"loads the specified message and category data Args: messages_filepath (string): The file", "categories.columns = category_colnames # get only the last value in each value as", "respectively, as '\\ 'well as the filepath of the database to save the", "engine.dispose() def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading", "the specified message and category data Args: messages_filepath (string): The file path of", "The cleaned dataframe database_filename (string): the file path to save the db Returns:", "up the categories column Args: df (pandas dataframe): combined categories and messages df", "= sys.argv[1:] print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath,", "categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned", "save_data(df, database_filename): \"\"\"Saves the resulting data to a sqlite db Args: df (pandas", "of the categories cv Returns: df (pandas dataframe): The combined messages and categories", "of the messages and categories '\\ 'datasets as the first and second argument", "up the final data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0, inplace=True) return df", "pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return pd.merge(messages, categories, on='id') def clean_data(df): \"\"\"Cleans the data:", "Cleaned dataframe with split categories \"\"\" # expand the categories column categories =", "the last value in each value as an integer categories = categories.applymap(lambda s:", "= row.applymap(lambda s: s[:-2]).iloc[0, :].tolist() categories.columns = category_colnames # get only the last", "def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\\n", "return pd.merge(messages, categories, on='id') def clean_data(df): \"\"\"Cleans the data: - drops duplicates -", "last value in each value as an integer categories = categories.applymap(lambda s: int(s[-1]))", "column Args: df (pandas dataframe): combined categories and messages df Returns: df (pandas", "drops duplicates - removes messages missing classes - cleans up the categories column", "# get the category names category_colnames = row.applymap(lambda s: s[:-2]).iloc[0, :].tolist() categories.columns =", "back to the original df df.drop('categories', axis=1, inplace=True) df = pd.concat([df, categories], axis=1)", "def save_data(df, database_filename): \"\"\"Saves the resulting data to a sqlite db Args: df", "df.related.replace(2, 0, inplace=True) return df def save_data(df, database_filename): \"\"\"Saves the resulting data to", "category_colnames = row.applymap(lambda s: s[:-2]).iloc[0, :].tolist() categories.columns = category_colnames # get only the", "combined categories and messages df Returns: df (pandas dataframe): Cleaned dataframe with split", "\"\"\" engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False, if_exists='replace') engine.dispose() def main(): if len(sys.argv)", "classes - cleans up the categories column Args: df (pandas dataframe): combined categories", "The file path of the categories cv Returns: df (pandas dataframe): The combined", "(pandas dataframe): The combined messages and categories df \"\"\" messages = pd.read_csv(messages_filepath) categories", "\"\"\"Cleans the data: - drops duplicates - removes messages missing classes - cleans", "get only the last value in each value as an integer categories =", "len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\\n MESSAGES: {}\\n CATEGORIES:", "\\n\\nExample: python process_data.py '\\ 'disaster_messages.csv disaster_categories.csv '\\ 'DisasterResponse.db') if __name__ == '__main__': main()", "as pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): \"\"\"loads the specified message", "# expand the categories column categories = df.categories.str.split(';', expand=True) row = categories[:1] #", "inplace=True) return df def save_data(df, database_filename): \"\"\"Saves the resulting data to a sqlite", "the file path to save the db Returns: None \"\"\" engine = create_engine('sqlite:///'+database_filename)", "database_filepath) print('Cleaned data saved to database!') else: print('Please provide the filepaths of the", "get the category names category_colnames = row.applymap(lambda s: s[:-2]).iloc[0, :].tolist() categories.columns = category_colnames", "the category names category_colnames = row.applymap(lambda s: s[:-2]).iloc[0, :].tolist() categories.columns = category_colnames #", "the categories column Args: df (pandas dataframe): combined categories and messages df Returns:", "cleaned dataframe database_filename (string): the file path to save the db Returns: None", "category names category_colnames = row.applymap(lambda s: s[:-2]).iloc[0, :].tolist() categories.columns = category_colnames # get", "load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath)", "path to save the db Returns: None \"\"\" engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine,", "= pd.concat([df, categories], axis=1) # clean up the final data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames,", "= categories.applymap(lambda s: int(s[-1])) # add the categories back to the original df", "filepaths of the messages and categories '\\ 'datasets as the first and second", "argument respectively, as '\\ 'well as the filepath of the database to save", "= load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\\n DATABASE: {}'.format(database_filepath)) save_data(df,", "csv categories_filepath (string): The file path of the categories cv Returns: df (pandas", "only the last value in each value as an integer categories = categories.applymap(lambda", "the cleaned data '\\ 'to as the third argument. \\n\\nExample: python process_data.py '\\", "and category data Args: messages_filepath (string): The file path of the messages csv", "categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\\n DATABASE:", "the data: - drops duplicates - removes messages missing classes - cleans up", "file path to save the db Returns: None \"\"\" engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages',", "data Args: messages_filepath (string): The file path of the messages csv categories_filepath (string):", "category data Args: messages_filepath (string): The file path of the messages csv categories_filepath", "database_filepath = sys.argv[1:] print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df =", "'datasets as the first and second argument respectively, as '\\ 'well as the", "- cleans up the categories column Args: df (pandas dataframe): combined categories and", "provide the filepaths of the messages and categories '\\ 'datasets as the first", "sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): \"\"\"loads the specified message and category data", "categories column categories = df.categories.str.split(';', expand=True) row = categories[:1] # get the category", "None \"\"\" engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False, if_exists='replace') engine.dispose() def main(): if", "the filepaths of the messages and categories '\\ 'datasets as the first and", "{}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving", "# clean up the final data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0, inplace=True)", "final data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0, inplace=True) return df def save_data(df,", "pd.merge(messages, categories, on='id') def clean_data(df): \"\"\"Cleans the data: - drops duplicates - removes", "message and category data Args: messages_filepath (string): The file path of the messages", "if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\\n MESSAGES: {}\\n", "messages and categories '\\ 'datasets as the first and second argument respectively, as", "categories cv Returns: df (pandas dataframe): The combined messages and categories df \"\"\"", "the messages csv categories_filepath (string): The file path of the categories cv Returns:", "missing classes - cleans up the categories column Args: df (pandas dataframe): combined", "categories \"\"\" # expand the categories column categories = df.categories.str.split(';', expand=True) row =", "clean up the final data df.drop_duplicates(subset='message', inplace=True) df.dropna(subset=category_colnames, inplace=True) df.related.replace(2, 0, inplace=True) return", "dataframe with split categories \"\"\" # expand the categories column categories = df.categories.str.split(';',", "s: int(s[-1])) # add the categories back to the original df df.drop('categories', axis=1,", "clean_data(df) print('Saving data...\\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else:", "df df.drop('categories', axis=1, inplace=True) df = pd.concat([df, categories], axis=1) # clean up the", "as the first and second argument respectively, as '\\ 'well as the filepath", "{}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please provide the filepaths", "and second argument respectively, as '\\ 'well as the filepath of the database", "categories column Args: df (pandas dataframe): combined categories and messages df Returns: df", "the db Returns: None \"\"\" engine = create_engine('sqlite:///'+database_filename) df.to_sql('labeled_messages', engine, index=False, if_exists='replace') engine.dispose()", "create_engine def load_data(messages_filepath, categories_filepath): \"\"\"loads the specified message and category data Args: messages_filepath", "as an integer categories = categories.applymap(lambda s: int(s[-1])) # add the categories back", "DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please provide the", "'well as the filepath of the database to save the cleaned data '\\", "categories = pd.read_csv(categories_filepath) return pd.merge(messages, categories, on='id') def clean_data(df): \"\"\"Cleans the data: -", "a sqlite db Args: df (pandas dataframe): The cleaned dataframe database_filename (string): the", "database!') else: print('Please provide the filepaths of the messages and categories '\\ 'datasets", "the third argument. \\n\\nExample: python process_data.py '\\ 'disaster_messages.csv disaster_categories.csv '\\ 'DisasterResponse.db') if __name__", "sys import pandas as pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): \"\"\"loads", ":].tolist() categories.columns = category_colnames # get only the last value in each value", "df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\\n DATABASE: {}'.format(database_filepath))", "categories_filepath (string): The file path of the categories cv Returns: df (pandas dataframe):", "print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning", "messages df Returns: df (pandas dataframe): Cleaned dataframe with split categories \"\"\" #", "import create_engine def load_data(messages_filepath, categories_filepath): \"\"\"loads the specified message and category data Args:", "categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df", "the filepath of the database to save the cleaned data '\\ 'to as", "df def save_data(df, database_filename): \"\"\"Saves the resulting data to a sqlite db Args:", "row.applymap(lambda s: s[:-2]).iloc[0, :].tolist() categories.columns = category_colnames # get only the last value", "(string): the file path to save the db Returns: None \"\"\" engine =", "{}\\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df =" ]
[ "\" is started\") while not self.wait(self._run_every): current_time = datetime.utcnow() for task in self._tasks:", "started\") while not self.wait(self._run_every): current_time = datetime.utcnow() for task in self._tasks: delete_older_than =", "delete_older_than = current_time - task[\"delete_after\"] query = Q(**{task[\"field\"] + \"__lt\": delete_older_than}) if task.get(\"additional_query\",", "task.get(\"additional_query\", None): query = query & task[\"additional_query\"] self.logger.debug( \"Removing %ss older than %s\"", "collection=None, field=None, delete_after=None, additional_query=None ): self._tasks.append( { \"collection\": collection, \"field\": field, \"delete_after\": delete_after,", "run(self): self.logger.info(self.display_name + \" is started\") while not self.wait(self._run_every): current_time = datetime.utcnow() for", "query = query & task[\"additional_query\"] self.logger.debug( \"Removing %ss older than %s\" % (task[\"collection\"].__name__,", "tasks=None, run_every=timedelta(minutes=15)): self.logger = logging.getLogger(__name__) self.display_name = \"Mongo Pruner\" self._run_every = run_every.total_seconds() self._tasks", "datetime import datetime, timedelta from mongoengine import Q from brewtils.stoppable_thread import StoppableThread class", "self.logger.debug( \"Removing %ss older than %s\" % (task[\"collection\"].__name__, str(delete_older_than)) ) task[\"collection\"].objects(query).delete() self.logger.info(self.display_name +", "datetime, timedelta from mongoengine import Q from brewtils.stoppable_thread import StoppableThread class MongoPruner(StoppableThread): def", "} ) def run(self): self.logger.info(self.display_name + \" is started\") while not self.wait(self._run_every): current_time", "self, collection=None, field=None, delete_after=None, additional_query=None ): self._tasks.append( { \"collection\": collection, \"field\": field, \"delete_after\":", "super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\") def add_task( self, collection=None, field=None, delete_after=None, additional_query=None ): self._tasks.append( {", "def __init__(self, tasks=None, run_every=timedelta(minutes=15)): self.logger = logging.getLogger(__name__) self.display_name = \"Mongo Pruner\" self._run_every =", "import Q from brewtils.stoppable_thread import StoppableThread class MongoPruner(StoppableThread): def __init__(self, tasks=None, run_every=timedelta(minutes=15)): self.logger", "is started\") while not self.wait(self._run_every): current_time = datetime.utcnow() for task in self._tasks: delete_older_than", "self._tasks: delete_older_than = current_time - task[\"delete_after\"] query = Q(**{task[\"field\"] + \"__lt\": delete_older_than}) if", "self.logger.info(self.display_name + \" is started\") while not self.wait(self._run_every): current_time = datetime.utcnow() for task", "task in self._tasks: delete_older_than = current_time - task[\"delete_after\"] query = Q(**{task[\"field\"] + \"__lt\":", "import StoppableThread class MongoPruner(StoppableThread): def __init__(self, tasks=None, run_every=timedelta(minutes=15)): self.logger = logging.getLogger(__name__) self.display_name =", "task[\"delete_after\"] query = Q(**{task[\"field\"] + \"__lt\": delete_older_than}) if task.get(\"additional_query\", None): query = query", "- task[\"delete_after\"] query = Q(**{task[\"field\"] + \"__lt\": delete_older_than}) if task.get(\"additional_query\", None): query =", "None): query = query & task[\"additional_query\"] self.logger.debug( \"Removing %ss older than %s\" %", "mongoengine import Q from brewtils.stoppable_thread import StoppableThread class MongoPruner(StoppableThread): def __init__(self, tasks=None, run_every=timedelta(minutes=15)):", "tasks or [] super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\") def add_task( self, collection=None, field=None, delete_after=None, additional_query=None", "\"__lt\": delete_older_than}) if task.get(\"additional_query\", None): query = query & task[\"additional_query\"] self.logger.debug( \"Removing %ss", "logging from datetime import datetime, timedelta from mongoengine import Q from brewtils.stoppable_thread import", "<filename>bartender/mongo_pruner.py<gh_stars>0 import logging from datetime import datetime, timedelta from mongoengine import Q from", "def run(self): self.logger.info(self.display_name + \" is started\") while not self.wait(self._run_every): current_time = datetime.utcnow()", "collection, \"field\": field, \"delete_after\": delete_after, \"additional_query\": additional_query, } ) def run(self): self.logger.info(self.display_name +", "brewtils.stoppable_thread import StoppableThread class MongoPruner(StoppableThread): def __init__(self, tasks=None, run_every=timedelta(minutes=15)): self.logger = logging.getLogger(__name__) self.display_name", "delete_after=None, additional_query=None ): self._tasks.append( { \"collection\": collection, \"field\": field, \"delete_after\": delete_after, \"additional_query\": additional_query,", "= datetime.utcnow() for task in self._tasks: delete_older_than = current_time - task[\"delete_after\"] query =", "additional_query=None ): self._tasks.append( { \"collection\": collection, \"field\": field, \"delete_after\": delete_after, \"additional_query\": additional_query, }", "current_time = datetime.utcnow() for task in self._tasks: delete_older_than = current_time - task[\"delete_after\"] query", "Pruner\" self._run_every = run_every.total_seconds() self._tasks = tasks or [] super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\") def", "self.display_name = \"Mongo Pruner\" self._run_every = run_every.total_seconds() self._tasks = tasks or [] super(MongoPruner,", "= \"Mongo Pruner\" self._run_every = run_every.total_seconds() self._tasks = tasks or [] super(MongoPruner, self).__init__(logger=self.logger,", "if task.get(\"additional_query\", None): query = query & task[\"additional_query\"] self.logger.debug( \"Removing %ss older than", "= logging.getLogger(__name__) self.display_name = \"Mongo Pruner\" self._run_every = run_every.total_seconds() self._tasks = tasks or", "run_every.total_seconds() self._tasks = tasks or [] super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\") def add_task( self, collection=None,", "[] super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\") def add_task( self, collection=None, field=None, delete_after=None, additional_query=None ): self._tasks.append(", "self).__init__(logger=self.logger, name=\"Remover\") def add_task( self, collection=None, field=None, delete_after=None, additional_query=None ): self._tasks.append( { \"collection\":", "= query & task[\"additional_query\"] self.logger.debug( \"Removing %ss older than %s\" % (task[\"collection\"].__name__, str(delete_older_than))", "task[\"additional_query\"] self.logger.debug( \"Removing %ss older than %s\" % (task[\"collection\"].__name__, str(delete_older_than)) ) task[\"collection\"].objects(query).delete() self.logger.info(self.display_name", "for task in self._tasks: delete_older_than = current_time - task[\"delete_after\"] query = Q(**{task[\"field\"] +", "\"Mongo Pruner\" self._run_every = run_every.total_seconds() self._tasks = tasks or [] super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\")", "current_time - task[\"delete_after\"] query = Q(**{task[\"field\"] + \"__lt\": delete_older_than}) if task.get(\"additional_query\", None): query", "class MongoPruner(StoppableThread): def __init__(self, tasks=None, run_every=timedelta(minutes=15)): self.logger = logging.getLogger(__name__) self.display_name = \"Mongo Pruner\"", "run_every=timedelta(minutes=15)): self.logger = logging.getLogger(__name__) self.display_name = \"Mongo Pruner\" self._run_every = run_every.total_seconds() self._tasks =", "= tasks or [] super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\") def add_task( self, collection=None, field=None, delete_after=None,", "timedelta from mongoengine import Q from brewtils.stoppable_thread import StoppableThread class MongoPruner(StoppableThread): def __init__(self,", "field, \"delete_after\": delete_after, \"additional_query\": additional_query, } ) def run(self): self.logger.info(self.display_name + \" is", "= current_time - task[\"delete_after\"] query = Q(**{task[\"field\"] + \"__lt\": delete_older_than}) if task.get(\"additional_query\", None):", "\"collection\": collection, \"field\": field, \"delete_after\": delete_after, \"additional_query\": additional_query, } ) def run(self): self.logger.info(self.display_name", "= run_every.total_seconds() self._tasks = tasks or [] super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\") def add_task( self,", "): self._tasks.append( { \"collection\": collection, \"field\": field, \"delete_after\": delete_after, \"additional_query\": additional_query, } )", ") def run(self): self.logger.info(self.display_name + \" is started\") while not self.wait(self._run_every): current_time =", "delete_older_than}) if task.get(\"additional_query\", None): query = query & task[\"additional_query\"] self.logger.debug( \"Removing %ss older", "import datetime, timedelta from mongoengine import Q from brewtils.stoppable_thread import StoppableThread class MongoPruner(StoppableThread):", "Q(**{task[\"field\"] + \"__lt\": delete_older_than}) if task.get(\"additional_query\", None): query = query & task[\"additional_query\"] self.logger.debug(", "def add_task( self, collection=None, field=None, delete_after=None, additional_query=None ): self._tasks.append( { \"collection\": collection, \"field\":", "older than %s\" % (task[\"collection\"].__name__, str(delete_older_than)) ) task[\"collection\"].objects(query).delete() self.logger.info(self.display_name + \" is stopped\")", "self._tasks.append( { \"collection\": collection, \"field\": field, \"delete_after\": delete_after, \"additional_query\": additional_query, } ) def", "name=\"Remover\") def add_task( self, collection=None, field=None, delete_after=None, additional_query=None ): self._tasks.append( { \"collection\": collection,", "= Q(**{task[\"field\"] + \"__lt\": delete_older_than}) if task.get(\"additional_query\", None): query = query & task[\"additional_query\"]", "\"Removing %ss older than %s\" % (task[\"collection\"].__name__, str(delete_older_than)) ) task[\"collection\"].objects(query).delete() self.logger.info(self.display_name + \"", "__init__(self, tasks=None, run_every=timedelta(minutes=15)): self.logger = logging.getLogger(__name__) self.display_name = \"Mongo Pruner\" self._run_every = run_every.total_seconds()", "in self._tasks: delete_older_than = current_time - task[\"delete_after\"] query = Q(**{task[\"field\"] + \"__lt\": delete_older_than})", "+ \"__lt\": delete_older_than}) if task.get(\"additional_query\", None): query = query & task[\"additional_query\"] self.logger.debug( \"Removing", "field=None, delete_after=None, additional_query=None ): self._tasks.append( { \"collection\": collection, \"field\": field, \"delete_after\": delete_after, \"additional_query\":", "\"delete_after\": delete_after, \"additional_query\": additional_query, } ) def run(self): self.logger.info(self.display_name + \" is started\")", "logging.getLogger(__name__) self.display_name = \"Mongo Pruner\" self._run_every = run_every.total_seconds() self._tasks = tasks or []", "MongoPruner(StoppableThread): def __init__(self, tasks=None, run_every=timedelta(minutes=15)): self.logger = logging.getLogger(__name__) self.display_name = \"Mongo Pruner\" self._run_every", "from mongoengine import Q from brewtils.stoppable_thread import StoppableThread class MongoPruner(StoppableThread): def __init__(self, tasks=None,", "self.logger = logging.getLogger(__name__) self.display_name = \"Mongo Pruner\" self._run_every = run_every.total_seconds() self._tasks = tasks", "StoppableThread class MongoPruner(StoppableThread): def __init__(self, tasks=None, run_every=timedelta(minutes=15)): self.logger = logging.getLogger(__name__) self.display_name = \"Mongo", "query & task[\"additional_query\"] self.logger.debug( \"Removing %ss older than %s\" % (task[\"collection\"].__name__, str(delete_older_than)) )", "while not self.wait(self._run_every): current_time = datetime.utcnow() for task in self._tasks: delete_older_than = current_time", "from brewtils.stoppable_thread import StoppableThread class MongoPruner(StoppableThread): def __init__(self, tasks=None, run_every=timedelta(minutes=15)): self.logger = logging.getLogger(__name__)", "\"field\": field, \"delete_after\": delete_after, \"additional_query\": additional_query, } ) def run(self): self.logger.info(self.display_name + \"", "%ss older than %s\" % (task[\"collection\"].__name__, str(delete_older_than)) ) task[\"collection\"].objects(query).delete() self.logger.info(self.display_name + \" is", "delete_after, \"additional_query\": additional_query, } ) def run(self): self.logger.info(self.display_name + \" is started\") while", "+ \" is started\") while not self.wait(self._run_every): current_time = datetime.utcnow() for task in", "\"additional_query\": additional_query, } ) def run(self): self.logger.info(self.display_name + \" is started\") while not", "{ \"collection\": collection, \"field\": field, \"delete_after\": delete_after, \"additional_query\": additional_query, } ) def run(self):", "self._run_every = run_every.total_seconds() self._tasks = tasks or [] super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\") def add_task(", "self.wait(self._run_every): current_time = datetime.utcnow() for task in self._tasks: delete_older_than = current_time - task[\"delete_after\"]", "or [] super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\") def add_task( self, collection=None, field=None, delete_after=None, additional_query=None ):", "add_task( self, collection=None, field=None, delete_after=None, additional_query=None ): self._tasks.append( { \"collection\": collection, \"field\": field,", "import logging from datetime import datetime, timedelta from mongoengine import Q from brewtils.stoppable_thread", "additional_query, } ) def run(self): self.logger.info(self.display_name + \" is started\") while not self.wait(self._run_every):", "not self.wait(self._run_every): current_time = datetime.utcnow() for task in self._tasks: delete_older_than = current_time -", "datetime.utcnow() for task in self._tasks: delete_older_than = current_time - task[\"delete_after\"] query = Q(**{task[\"field\"]", "Q from brewtils.stoppable_thread import StoppableThread class MongoPruner(StoppableThread): def __init__(self, tasks=None, run_every=timedelta(minutes=15)): self.logger =", "query = Q(**{task[\"field\"] + \"__lt\": delete_older_than}) if task.get(\"additional_query\", None): query = query &", "& task[\"additional_query\"] self.logger.debug( \"Removing %ss older than %s\" % (task[\"collection\"].__name__, str(delete_older_than)) ) task[\"collection\"].objects(query).delete()", "from datetime import datetime, timedelta from mongoengine import Q from brewtils.stoppable_thread import StoppableThread", "self._tasks = tasks or [] super(MongoPruner, self).__init__(logger=self.logger, name=\"Remover\") def add_task( self, collection=None, field=None," ]
[ "route @app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0', port=8080) def main(): t", "# index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index, sortby): index = str(index).lower() sortby = str(sortby).lower()", "import render_template from threading import Thread from data import * # config app", "jsonify from flask.templating import render_template from threading import Thread from data import *", "sortby = str(sortby).lower() indices = { 'it': fetch_nifty_index_data(\"it\", sortby), 'bank': fetch_nifty_index_data(\"bank\", sortby), '50':", "@app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index, sortby): index = str(index).lower() sortby = str(sortby).lower() indices = {", "send_nifty_index_data(index, sortby): index = str(index).lower() sortby = str(sortby).lower() indices = { 'it': fetch_nifty_index_data(\"it\",", "# config app = Flask(__name__) app.config[\"JSON_SORT_KEYS\"] = False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True # home", "config app = Flask(__name__) app.config[\"JSON_SORT_KEYS\"] = False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True # home route", "f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def help(): return render_template(\"help.html\") # index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index, sortby):", "from flask import Flask, jsonify from flask.templating import render_template from threading import Thread", "render_template(\"help.html\") # index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index, sortby): index = str(index).lower() sortby =", "@app.route(\"/\") def home(): return render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def help(): return render_template(\"help.html\") #", "app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True # home route @app.route(\"/\") def home(): return render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\"", "sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\", sortby) } if index in indices: return jsonify(indices[index]) # all", "help(): return render_template(\"help.html\") # index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index, sortby): index = str(index).lower()", "def send_nifty_index_data(index, sortby): index = str(index).lower() sortby = str(sortby).lower() indices = { 'it':", "= str(sortby).lower() indices = { 'it': fetch_nifty_index_data(\"it\", sortby), 'bank': fetch_nifty_index_data(\"bank\", sortby), '50': fetch_nifty_index_data(\"50\",", "fetch_nifty_index_data(\"bank\", sortby), '50': fetch_nifty_index_data(\"50\", sortby), 'auto': fetch_nifty_index_data(\"auto\", sortby), 'pharma': fetch_nifty_index_data(\"pharma\", sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\",", "return render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def help(): return render_template(\"help.html\") # index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\")", "fetch_nifty_index_data(\"auto\", sortby), 'pharma': fetch_nifty_index_data(\"pharma\", sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\", sortby) } if index in indices:", "sortby), 'bank': fetch_nifty_index_data(\"bank\", sortby), '50': fetch_nifty_index_data(\"50\", sortby), 'auto': fetch_nifty_index_data(\"auto\", sortby), 'pharma': fetch_nifty_index_data(\"pharma\", sortby),", "index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index, sortby): index = str(index).lower() sortby = str(sortby).lower() indices", "jsonify(indices[index]) # all index route @app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0',", "# home route @app.route(\"/\") def home(): return render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def help():", "sortby) } if index in indices: return jsonify(indices[index]) # all index route @app.route(f\"/{KEY}/data/nifty/indices/all\")", "{ 'it': fetch_nifty_index_data(\"it\", sortby), 'bank': fetch_nifty_index_data(\"bank\", sortby), '50': fetch_nifty_index_data(\"50\", sortby), 'auto': fetch_nifty_index_data(\"auto\", sortby),", "'bank': fetch_nifty_index_data(\"bank\", sortby), '50': fetch_nifty_index_data(\"50\", sortby), 'auto': fetch_nifty_index_data(\"auto\", sortby), 'pharma': fetch_nifty_index_data(\"pharma\", sortby), 'fmcg':", "= str(index).lower() sortby = str(sortby).lower() indices = { 'it': fetch_nifty_index_data(\"it\", sortby), 'bank': fetch_nifty_index_data(\"bank\",", "fetch_nifty_index_data(\"pharma\", sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\", sortby) } if index in indices: return jsonify(indices[index]) #", "Flask, jsonify from flask.templating import render_template from threading import Thread from data import", "False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True # home route @app.route(\"/\") def home(): return render_template(\"index.html\") #", "* # config app = Flask(__name__) app.config[\"JSON_SORT_KEYS\"] = False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True #", "'it': fetch_nifty_index_data(\"it\", sortby), 'bank': fetch_nifty_index_data(\"bank\", sortby), '50': fetch_nifty_index_data(\"50\", sortby), 'auto': fetch_nifty_index_data(\"auto\", sortby), 'pharma':", "sortby): index = str(index).lower() sortby = str(sortby).lower() indices = { 'it': fetch_nifty_index_data(\"it\", sortby),", "'pharma': fetch_nifty_index_data(\"pharma\", sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\", sortby) } if index in indices: return jsonify(indices[index])", "@app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0', port=8080) def main(): t =", "def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0', port=8080) def main(): t = Thread(target=run)", "import Flask, jsonify from flask.templating import render_template from threading import Thread from data", "import Thread from data import * # config app = Flask(__name__) app.config[\"JSON_SORT_KEYS\"] =", "run(): app.run(host='0.0.0.0', port=8080) def main(): t = Thread(target=run) t.start() if __name__ == \"__main__\":", "= True # home route @app.route(\"/\") def home(): return render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\")", "render_template from threading import Thread from data import * # config app =", "indices = { 'it': fetch_nifty_index_data(\"it\", sortby), 'bank': fetch_nifty_index_data(\"bank\", sortby), '50': fetch_nifty_index_data(\"50\", sortby), 'auto':", "str(index).lower() sortby = str(sortby).lower() indices = { 'it': fetch_nifty_index_data(\"it\", sortby), 'bank': fetch_nifty_index_data(\"bank\", sortby),", "threading import Thread from data import * # config app = Flask(__name__) app.config[\"JSON_SORT_KEYS\"]", "flask import Flask, jsonify from flask.templating import render_template from threading import Thread from", "app.run(host='0.0.0.0', port=8080) def main(): t = Thread(target=run) t.start() if __name__ == \"__main__\": main()", "home(): return render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def help(): return render_template(\"help.html\") # index route", "sortby), 'auto': fetch_nifty_index_data(\"auto\", sortby), 'pharma': fetch_nifty_index_data(\"pharma\", sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\", sortby) } if index", "return render_template(\"help.html\") # index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index, sortby): index = str(index).lower() sortby", "indices: return jsonify(indices[index]) # all index route @app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def", "def home(): return render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def help(): return render_template(\"help.html\") # index", "= Flask(__name__) app.config[\"JSON_SORT_KEYS\"] = False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True # home route @app.route(\"/\") def", "from data import * # config app = Flask(__name__) app.config[\"JSON_SORT_KEYS\"] = False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"]", "} if index in indices: return jsonify(indices[index]) # all index route @app.route(f\"/{KEY}/data/nifty/indices/all\") def", "True # home route @app.route(\"/\") def home(): return render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def", "app.config[\"JSON_SORT_KEYS\"] = False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True # home route @app.route(\"/\") def home(): return", "def run(): app.run(host='0.0.0.0', port=8080) def main(): t = Thread(target=run) t.start() if __name__ ==", "# f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def help(): return render_template(\"help.html\") # index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index,", "= False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True # home route @app.route(\"/\") def home(): return render_template(\"index.html\")", "return jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0', port=8080) def main(): t = Thread(target=run) t.start() if", "index route @app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0', port=8080) def main():", "if index in indices: return jsonify(indices[index]) # all index route @app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data():", "def help(): return render_template(\"help.html\") # index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index, sortby): index =", "from threading import Thread from data import * # config app = Flask(__name__)", "fetch_nifty_index_data(\"it\", sortby), 'bank': fetch_nifty_index_data(\"bank\", sortby), '50': fetch_nifty_index_data(\"50\", sortby), 'auto': fetch_nifty_index_data(\"auto\", sortby), 'pharma': fetch_nifty_index_data(\"pharma\",", "in indices: return jsonify(indices[index]) # all index route @app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data())", "route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index, sortby): index = str(index).lower() sortby = str(sortby).lower() indices =", "index = str(index).lower() sortby = str(sortby).lower() indices = { 'it': fetch_nifty_index_data(\"it\", sortby), 'bank':", "@app.route(\"/help\") def help(): return render_template(\"help.html\") # index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def send_nifty_index_data(index, sortby): index", "app = Flask(__name__) app.config[\"JSON_SORT_KEYS\"] = False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True # home route @app.route(\"/\")", "route @app.route(\"/\") def home(): return render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def help(): return render_template(\"help.html\")", "data import * # config app = Flask(__name__) app.config[\"JSON_SORT_KEYS\"] = False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] =", "flask.templating import render_template from threading import Thread from data import * # config", "return jsonify(indices[index]) # all index route @app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def run():", "sortby), '50': fetch_nifty_index_data(\"50\", sortby), 'auto': fetch_nifty_index_data(\"auto\", sortby), 'pharma': fetch_nifty_index_data(\"pharma\", sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\", sortby)", "# all index route @app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0', port=8080)", "index in indices: return jsonify(indices[index]) # all index route @app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data(): return", "fetch_nifty_index_data(\"fmcg\", sortby) } if index in indices: return jsonify(indices[index]) # all index route", "jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0', port=8080) def main(): t = Thread(target=run) t.start() if __name__", "'fmcg': fetch_nifty_index_data(\"fmcg\", sortby) } if index in indices: return jsonify(indices[index]) # all index", "home route @app.route(\"/\") def home(): return render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def help(): return", "import * # config app = Flask(__name__) app.config[\"JSON_SORT_KEYS\"] = False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True", "fetch_nifty_index_data(\"50\", sortby), 'auto': fetch_nifty_index_data(\"auto\", sortby), 'pharma': fetch_nifty_index_data(\"pharma\", sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\", sortby) } if", "'50': fetch_nifty_index_data(\"50\", sortby), 'auto': fetch_nifty_index_data(\"auto\", sortby), 'pharma': fetch_nifty_index_data(\"pharma\", sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\", sortby) }", "send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0', port=8080) def main(): t = Thread(target=run) t.start()", "sortby), 'pharma': fetch_nifty_index_data(\"pharma\", sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\", sortby) } if index in indices: return", "render_template(\"index.html\") # f\"/{KEY}/data/nifty/index/all/sort/sortby\" @app.route(\"/help\") def help(): return render_template(\"help.html\") # index route @app.route(f\"/{KEY}/data/nifty/<string:index>/all/sort/<string:sortby>\") def", "from flask.templating import render_template from threading import Thread from data import * #", "all index route @app.route(f\"/{KEY}/data/nifty/indices/all\") def send_all_nifty_indices_data(): return jsonify(fetch_all_indices_data()) def run(): app.run(host='0.0.0.0', port=8080) def", "Thread from data import * # config app = Flask(__name__) app.config[\"JSON_SORT_KEYS\"] = False", "Flask(__name__) app.config[\"JSON_SORT_KEYS\"] = False app.config[\"JSONIFY_PRETTYPRINT_REGULAR\"] = True # home route @app.route(\"/\") def home():", "str(sortby).lower() indices = { 'it': fetch_nifty_index_data(\"it\", sortby), 'bank': fetch_nifty_index_data(\"bank\", sortby), '50': fetch_nifty_index_data(\"50\", sortby),", "'auto': fetch_nifty_index_data(\"auto\", sortby), 'pharma': fetch_nifty_index_data(\"pharma\", sortby), 'fmcg': fetch_nifty_index_data(\"fmcg\", sortby) } if index in", "= { 'it': fetch_nifty_index_data(\"it\", sortby), 'bank': fetch_nifty_index_data(\"bank\", sortby), '50': fetch_nifty_index_data(\"50\", sortby), 'auto': fetch_nifty_index_data(\"auto\"," ]
[]
[ "pyforchange.egg.resources.modules import * from pyforchange.egg.resources.console import * from pyforchange.egg.resources.constants import * from pyforchange.egg.resources.extensions", "from pyforchange.egg.resources.modules import * from pyforchange.egg.resources.console import * from pyforchange.egg.resources.constants import * from", "pyforchange.egg.resources.console import * from pyforchange.egg.resources.constants import * from pyforchange.egg.resources.extensions import * from pyforchange.egg.app", "import * from pyforchange.egg.resources.constants import * from pyforchange.egg.resources.extensions import * from pyforchange.egg.app import", "* from pyforchange.egg.resources.constants import * from pyforchange.egg.resources.extensions import * from pyforchange.egg.app import *", "from pyforchange.egg.resources.console import * from pyforchange.egg.resources.constants import * from pyforchange.egg.resources.extensions import * from", "* from pyforchange.egg.resources.console import * from pyforchange.egg.resources.constants import * from pyforchange.egg.resources.extensions import *", "import * from pyforchange.egg.resources.console import * from pyforchange.egg.resources.constants import * from pyforchange.egg.resources.extensions import" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "\"\"\" Applies Content-Type and gzip Content-Encoding to json files in a bucket prefix", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "or blob.content_disposition != \"inline\"): blob.content_type = \"application/json\" blob.content_encoding = \"gzip\" blob.content_disposition = \"inline\"", "License. # You may obtain a copy of the License at # #", "governing permissions and # limitations under the License. from google.cloud import storage import", "serving of gzipped assets to clients who can decompress themselves, both the content", "content type and content encoding meta data need to be set on JSON", "Most methods of transferring objects into a bucket do not correctly set this", "set this meta data, so we have this utility to correct for this", "google.cloud import storage import sys def apply_json_metadata(bucket_name, prefix_name): \"\"\" Applies Content-Type and gzip", "the fact. See also: https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) for", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "meta data, so we have this utility to correct for this after the", "bucket do not correctly set this meta data, so we have this utility", "Applies Content-Type and gzip Content-Encoding to json files in a bucket prefix In", "sys def apply_json_metadata(bucket_name, prefix_name): \"\"\" Applies Content-Type and gzip Content-Encoding to json files", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= \"application/json\" blob.content_encoding = \"gzip\" blob.content_disposition = \"inline\" blob.patch() if __name__ == \"__main__\":", "this file except in compliance with the License. # You may obtain a", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "storage.Client() bucket = storage_client.bucket(bucket_name) for blob in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type != \"application/json\"", "you may not use this file except in compliance with the License. #", "blob.content_type = \"application/json\" blob.content_encoding = \"gzip\" blob.content_disposition = \"inline\" blob.patch() if __name__ ==", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type != \"application/json\" or blob.content_encoding != \"gzip\" or blob.content_disposition != \"inline\"):", "utility to correct for this after the fact. See also: https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client", "set on JSON objects. Most methods of transferring objects into a bucket do", "blob.content_encoding = \"gzip\" blob.content_disposition = \"inline\" blob.patch() if __name__ == \"__main__\": if(len(sys.argv) !=", "limitations under the License. from google.cloud import storage import sys def apply_json_metadata(bucket_name, prefix_name):", "order to allow for decompressive transcoding and serving of gzipped assets to clients", "prefix_name): \"\"\" Applies Content-Type and gzip Content-Encoding to json files in a bucket", "Google LLC # # Licensed under the Apache License, Version 2.0 (the \"License\");", "ANY KIND, either express or implied. # See the License for the specific", "need to be set on JSON objects. Most methods of transferring objects into", "apply_json_metadata(bucket_name, prefix_name): \"\"\" Applies Content-Type and gzip Content-Encoding to json files in a", "# Copyright 2020 Google LLC # # Licensed under the Apache License, Version", "in compliance with the License. # You may obtain a copy of the", "2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the", "gzipped assets to clients who can decompress themselves, both the content type and", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "who can decompress themselves, both the content type and content encoding meta data", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type != \"application/json\" or blob.content_encoding != \"gzip\" or blob.content_disposition", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "both the content type and content encoding meta data need to be set", "not use this file except in compliance with the License. # You may", "clients who can decompress themselves, both the content type and content encoding meta", "= storage.Client() bucket = storage_client.bucket(bucket_name) for blob in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type !=", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "blob.content_disposition = \"inline\" blob.patch() if __name__ == \"__main__\": if(len(sys.argv) != 3): print(\"Usage: apply_json_meta", "a bucket do not correctly set this meta data, so we have this", "this utility to correct for this after the fact. See also: https://cloud.google.com/storage/docs/transcoding \"\"\"", "assets to clients who can decompress themselves, both the content type and content", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= \"inline\" blob.patch() if __name__ == \"__main__\": if(len(sys.argv) != 3): print(\"Usage: apply_json_meta [bucket_name]", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "In order to allow for decompressive transcoding and serving of gzipped assets to", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "correctly set this meta data, so we have this utility to correct for", "\"application/json\" or blob.content_encoding != \"gzip\" or blob.content_disposition != \"inline\"): blob.content_type = \"application/json\" blob.content_encoding", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type != \"application/json\" or blob.content_encoding != \"gzip\" or blob.content_disposition !=", "to json files in a bucket prefix In order to allow for decompressive", "encoding meta data need to be set on JSON objects. Most methods of", "and serving of gzipped assets to clients who can decompress themselves, both the", "OF ANY KIND, either express or implied. # See the License for the", "License. from google.cloud import storage import sys def apply_json_metadata(bucket_name, prefix_name): \"\"\" Applies Content-Type", "gzip Content-Encoding to json files in a bucket prefix In order to allow", "so we have this utility to correct for this after the fact. See", "2.0 (the \"License\"); # you may not use this file except in compliance", "to clients who can decompress themselves, both the content type and content encoding", "storage import sys def apply_json_metadata(bucket_name, prefix_name): \"\"\" Applies Content-Type and gzip Content-Encoding to", "the content type and content encoding meta data need to be set on", "storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) for blob in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type", "Content-Type and gzip Content-Encoding to json files in a bucket prefix In order", "# you may not use this file except in compliance with the License.", "for blob in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type != \"application/json\" or blob.content_encoding != \"gzip\"", "correct for this after the fact. See also: https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client = storage.Client()", "agreed to in writing, software # distributed under the License is distributed on", "\"gzip\" blob.content_disposition = \"inline\" blob.patch() if __name__ == \"__main__\": if(len(sys.argv) != 3): print(\"Usage:", "blob.patch() if __name__ == \"__main__\": if(len(sys.argv) != 3): print(\"Usage: apply_json_meta [bucket_name] [prefix_name]\") else:", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "decompress themselves, both the content type and content encoding meta data need to", "\"inline\"): blob.content_type = \"application/json\" blob.content_encoding = \"gzip\" blob.content_disposition = \"inline\" blob.patch() if __name__", "can decompress themselves, both the content type and content encoding meta data need", "storage_client.bucket(bucket_name) for blob in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type != \"application/json\" or blob.content_encoding !=", "bucket = storage_client.bucket(bucket_name) for blob in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type != \"application/json\" or", "(the \"License\"); # you may not use this file except in compliance with", "on JSON objects. Most methods of transferring objects into a bucket do not", "import sys def apply_json_metadata(bucket_name, prefix_name): \"\"\" Applies Content-Type and gzip Content-Encoding to json", "blob in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type != \"application/json\" or blob.content_encoding != \"gzip\" or", "transcoding and serving of gzipped assets to clients who can decompress themselves, both", "data, so we have this utility to correct for this after the fact.", "# # Unless required by applicable law or agreed to in writing, software", "a bucket prefix In order to allow for decompressive transcoding and serving of", "not correctly set this meta data, so we have this utility to correct", "be set on JSON objects. Most methods of transferring objects into a bucket", "express or implied. # See the License for the specific language governing permissions", "the License. from google.cloud import storage import sys def apply_json_metadata(bucket_name, prefix_name): \"\"\" Applies", "fact. See also: https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) for blob", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "for the specific language governing permissions and # limitations under the License. from", "except in compliance with the License. # You may obtain a copy of", "def apply_json_metadata(bucket_name, prefix_name): \"\"\" Applies Content-Type and gzip Content-Encoding to json files in", "or blob.content_encoding != \"gzip\" or blob.content_disposition != \"inline\"): blob.content_type = \"application/json\" blob.content_encoding =", "by applicable law or agreed to in writing, software # distributed under the", "of transferring objects into a bucket do not correctly set this meta data,", "for this after the fact. See also: https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client = storage.Client() bucket", "LLC # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "and # limitations under the License. from google.cloud import storage import sys def", "!= \"inline\"): blob.content_type = \"application/json\" blob.content_encoding = \"gzip\" blob.content_disposition = \"inline\" blob.patch() if", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "data need to be set on JSON objects. Most methods of transferring objects", "either express or implied. # See the License for the specific language governing", "the specific language governing permissions and # limitations under the License. from google.cloud", "to correct for this after the fact. See also: https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client =", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "for decompressive transcoding and serving of gzipped assets to clients who can decompress", "blob.content_disposition != \"inline\"): blob.content_type = \"application/json\" blob.content_encoding = \"gzip\" blob.content_disposition = \"inline\" blob.patch()", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "transferring objects into a bucket do not correctly set this meta data, so", "!= \"application/json\" or blob.content_encoding != \"gzip\" or blob.content_disposition != \"inline\"): blob.content_type = \"application/json\"", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "\"application/json\" blob.content_encoding = \"gzip\" blob.content_disposition = \"inline\" blob.patch() if __name__ == \"__main__\": if(len(sys.argv)", "language governing permissions and # limitations under the License. from google.cloud import storage", "blob.content_encoding != \"gzip\" or blob.content_disposition != \"inline\"): blob.content_type = \"application/json\" blob.content_encoding = \"gzip\"", "= \"gzip\" blob.content_disposition = \"inline\" blob.patch() if __name__ == \"__main__\": if(len(sys.argv) != 3):", "methods of transferring objects into a bucket do not correctly set this meta", "file except in compliance with the License. # You may obtain a copy", "= storage_client.bucket(bucket_name) for blob in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name) if(blob.content_type != \"application/json\" or blob.content_encoding", "https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) for blob in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")):", "this after the fact. See also: https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client = storage.Client() bucket =", "after the fact. See also: https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client = storage.Client() bucket = storage_client.bucket(bucket_name)", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "under the License. from google.cloud import storage import sys def apply_json_metadata(bucket_name, prefix_name): \"\"\"", "in a bucket prefix In order to allow for decompressive transcoding and serving", "import storage import sys def apply_json_metadata(bucket_name, prefix_name): \"\"\" Applies Content-Type and gzip Content-Encoding", "specific language governing permissions and # limitations under the License. from google.cloud import", "from google.cloud import storage import sys def apply_json_metadata(bucket_name, prefix_name): \"\"\" Applies Content-Type and", "the License. # You may obtain a copy of the License at #", "!= \"gzip\" or blob.content_disposition != \"inline\"): blob.content_type = \"application/json\" blob.content_encoding = \"gzip\" blob.content_disposition", "permissions and # limitations under the License. from google.cloud import storage import sys", "to in writing, software # distributed under the License is distributed on an", "themselves, both the content type and content encoding meta data need to be", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "json files in a bucket prefix In order to allow for decompressive transcoding", "type and content encoding meta data need to be set on JSON objects.", "meta data need to be set on JSON objects. Most methods of transferring", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "also: https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) for blob in bucket.list_blobs(prefix=prefix_name):", "\"License\"); # you may not use this file except in compliance with the", "we have this utility to correct for this after the fact. See also:", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "to allow for decompressive transcoding and serving of gzipped assets to clients who", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "of gzipped assets to clients who can decompress themselves, both the content type", "\"gzip\" or blob.content_disposition != \"inline\"): blob.content_type = \"application/json\" blob.content_encoding = \"gzip\" blob.content_disposition =", "applicable law or agreed to in writing, software # distributed under the License", "bucket prefix In order to allow for decompressive transcoding and serving of gzipped", "Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0", "Content-Encoding to json files in a bucket prefix In order to allow for", "prefix In order to allow for decompressive transcoding and serving of gzipped assets", "if __name__ == \"__main__\": if(len(sys.argv) != 3): print(\"Usage: apply_json_meta [bucket_name] [prefix_name]\") else: apply_json_metadata(sys.argv[1],sys.argv[2])", "<filename>src/scripts/apply_json_metadata.py # Copyright 2020 Google LLC # # Licensed under the Apache License,", "if(blob.content_type != \"application/json\" or blob.content_encoding != \"gzip\" or blob.content_disposition != \"inline\"): blob.content_type =", "this meta data, so we have this utility to correct for this after", "to be set on JSON objects. Most methods of transferring objects into a", "objects. Most methods of transferring objects into a bucket do not correctly set", "or agreed to in writing, software # distributed under the License is distributed", "\"inline\" blob.patch() if __name__ == \"__main__\": if(len(sys.argv) != 3): print(\"Usage: apply_json_meta [bucket_name] [prefix_name]\")", "or implied. # See the License for the specific language governing permissions and", "# limitations under the License. from google.cloud import storage import sys def apply_json_metadata(bucket_name,", "\"\"\" storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) for blob in bucket.list_blobs(prefix=prefix_name): if(blob.name.endswith(\"json\")): print(blob.name)", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "allow for decompressive transcoding and serving of gzipped assets to clients who can", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "decompressive transcoding and serving of gzipped assets to clients who can decompress themselves,", "See also: https://cloud.google.com/storage/docs/transcoding \"\"\" storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) for blob in", "have this utility to correct for this after the fact. See also: https://cloud.google.com/storage/docs/transcoding", "with the License. # You may obtain a copy of the License at", "content encoding meta data need to be set on JSON objects. Most methods", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "and gzip Content-Encoding to json files in a bucket prefix In order to", "into a bucket do not correctly set this meta data, so we have", "do not correctly set this meta data, so we have this utility to", "JSON objects. Most methods of transferring objects into a bucket do not correctly", "objects into a bucket do not correctly set this meta data, so we", "files in a bucket prefix In order to allow for decompressive transcoding and", "print(blob.name) if(blob.content_type != \"application/json\" or blob.content_encoding != \"gzip\" or blob.content_disposition != \"inline\"): blob.content_type", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "and content encoding meta data need to be set on JSON objects. Most" ]
[]
[ "yt ds = yt.load(\"/home/lindsayad/projects/moose/modules/navier_stokes/tests/ins/lid_driven/gold/lid_driven_out.e\", step=-1) slc = yt.SlicePlot(ds, 'z', ('connect1', 'vel_y')) slc.set_log(('connect1','vel_y'), False)", "ds = yt.load(\"/home/lindsayad/projects/moose/modules/navier_stokes/tests/ins/lid_driven/gold/lid_driven_out.e\", step=-1) slc = yt.SlicePlot(ds, 'z', ('connect1', 'vel_y')) slc.set_log(('connect1','vel_y'), False) slc.set_width((1,", "import yt ds = yt.load(\"/home/lindsayad/projects/moose/modules/navier_stokes/tests/ins/lid_driven/gold/lid_driven_out.e\", step=-1) slc = yt.SlicePlot(ds, 'z', ('connect1', 'vel_y')) slc.set_log(('connect1','vel_y'),", "= yt.load(\"/home/lindsayad/projects/moose/modules/navier_stokes/tests/ins/lid_driven/gold/lid_driven_out.e\", step=-1) slc = yt.SlicePlot(ds, 'z', ('connect1', 'vel_y')) slc.set_log(('connect1','vel_y'), False) slc.set_width((1, 1))", "<filename>quad9_plot.py import yt ds = yt.load(\"/home/lindsayad/projects/moose/modules/navier_stokes/tests/ins/lid_driven/gold/lid_driven_out.e\", step=-1) slc = yt.SlicePlot(ds, 'z', ('connect1', 'vel_y'))", "yt.load(\"/home/lindsayad/projects/moose/modules/navier_stokes/tests/ins/lid_driven/gold/lid_driven_out.e\", step=-1) slc = yt.SlicePlot(ds, 'z', ('connect1', 'vel_y')) slc.set_log(('connect1','vel_y'), False) slc.set_width((1, 1)) slc.save()" ]
[ "Log.objects.create(**_data) def get_user_from_email(self, email): user = get_user_model() try: return user.objects.get(email=email) except Exception as", "\"scheduled\", \"rejected\", or \"invalid\" STATUS_TRANSLATOR = { 'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED,", "class MandrillLogger(): REASON_TRANSLATOR = { 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB,", "LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE, } # The sending", "_data['status'] = self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception as e: pass", "either \"sent\", \"queued\", \"scheduled\", \"rejected\", or \"invalid\" STATUS_TRANSLATOR = { 'sent': LogStatus.SENT, 'queued':", "LogReason, LogStatus from .models import Log class MandrillLogger(): REASON_TRANSLATOR = { 'hard-bounce': LogReason.HARD_BOUNCE,", "LogStatus from .models import Log class MandrillLogger(): REASON_TRANSLATOR = { 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce':", "self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def translate_enum(self, _dict, _str, _default=None): try: return _dict[_str] except Exception", "The sending status of the recipient - either \"sent\", \"queued\", \"scheduled\", \"rejected\", or", "LogReason.UNSIGNED, 'rule': LogReason.RULE, } # The sending status of the recipient - either", "\"invalid\" STATUS_TRANSLATOR = { 'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid':", "as e: print(e) return None def get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def", ".enums import LogReason, LogStatus from .models import Log class MandrillLogger(): REASON_TRANSLATOR = {", "reason, LogReason.NA) def get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def translate_enum(self, _dict, _str,", "= mandrill_response['_id'] _data['meta_data'] = mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None))", "return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def translate_enum(self,", "{ 'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID, } def", "self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception as e: pass self.save_log(_data) def", "or \"invalid\" STATUS_TRANSLATOR = { 'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED,", "'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit':", "LogStatus.INVALID, } def __init__(self): pass def log_email(self, email): for recipient in email.to: _data", "from django.contrib.auth import get_user_model from .enums import LogReason, LogStatus from .models import Log", "= email.template_name _data['email'] = recipient _data['user'] = self.get_user_from_email(recipient) try: mandrill_response = email.mandrill_response[0] _data['mandrill_id']", "django.contrib.auth import get_user_model from .enums import LogReason, LogStatus from .models import Log class", "get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT)", "_data['email'] = recipient _data['user'] = self.get_user_from_email(recipient) try: mandrill_response = email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id']", "'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE, } # The sending status of the recipient -", "print(e) return None def get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def get_status_enum(self, status):", "- either \"sent\", \"queued\", \"scheduled\", \"rejected\", or \"invalid\" STATUS_TRANSLATOR = { 'sent': LogStatus.SENT,", "LogReason.NA) def get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def translate_enum(self, _dict, _str, _default=None):", "import Log class MandrillLogger(): REASON_TRANSLATOR = { 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM,", "email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id'] _data['meta_data'] = mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] =", "MandrillLogger(): REASON_TRANSLATOR = { 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom':", "return None def get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def get_status_enum(self, status): return", "'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE, }", "= get_user_model() try: return user.objects.get(email=email) except Exception as e: print(e) return None def", "def get_user_from_email(self, email): user = get_user_model() try: return user.objects.get(email=email) except Exception as e:", "_data['mandrill_id'] = mandrill_response['_id'] _data['meta_data'] = mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason',", "get_user_from_email(self, email): user = get_user_model() try: return user.objects.get(email=email) except Exception as e: print(e)", "_data['template'] = email.template_name _data['email'] = recipient _data['user'] = self.get_user_from_email(recipient) try: mandrill_response = email.mandrill_response[0]", "def save_log(self, _data): Log.objects.create(**_data) def get_user_from_email(self, email): user = get_user_model() try: return user.objects.get(email=email)", "= self.get_user_from_email(recipient) try: mandrill_response = email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id'] _data['meta_data'] = mandrill_response _data['status']", "# The sending status of the recipient - either \"sent\", \"queued\", \"scheduled\", \"rejected\",", "get_user_model() try: return user.objects.get(email=email) except Exception as e: print(e) return None def get_reason_enum(self,", "\"rejected\", or \"invalid\" STATUS_TRANSLATOR = { 'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected':", "'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID, } def __init__(self): pass def", "import LogReason, LogStatus from .models import Log class MandrillLogger(): REASON_TRANSLATOR = { 'hard-bounce':", "{ 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER,", "= self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception as e: pass self.save_log(_data)", "None)) except Exception as e: pass self.save_log(_data) def save_log(self, _data): Log.objects.create(**_data) def get_user_from_email(self,", "Exception as e: print(e) return None def get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA)", "def log_email(self, email): for recipient in email.to: _data = {} _data['template'] = email.template_name", "= self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception as e: pass self.save_log(_data) def save_log(self, _data): Log.objects.create(**_data)", "LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE, } # The sending status of", "STATUS_TRANSLATOR = { 'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID,", "= email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id'] _data['meta_data'] = mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status', None)) _data['reason']", "= { 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender':", ".models import Log class MandrillLogger(): REASON_TRANSLATOR = { 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam':", "} # The sending status of the recipient - either \"sent\", \"queued\", \"scheduled\",", "LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID,", "sending status of the recipient - either \"sent\", \"queued\", \"scheduled\", \"rejected\", or \"invalid\"", "LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED,", "get_user_model from .enums import LogReason, LogStatus from .models import Log class MandrillLogger(): REASON_TRANSLATOR", "None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception as e: pass self.save_log(_data) def save_log(self,", "'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE, } # The sending status of the", "reason): return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def", "LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID, } def __init__(self): pass def log_email(self,", "status): return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def translate_enum(self, _dict, _str, _default=None): try: return _dict[_str]", "user.objects.get(email=email) except Exception as e: print(e) return None def get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR,", "LogReason.RULE, } # The sending status of the recipient - either \"sent\", \"queued\",", "log_email(self, email): for recipient in email.to: _data = {} _data['template'] = email.template_name _data['email']", "save_log(self, _data): Log.objects.create(**_data) def get_user_from_email(self, email): user = get_user_model() try: return user.objects.get(email=email) except", "self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def translate_enum(self, _dict,", "LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE, } #", "pass def log_email(self, email): for recipient in email.to: _data = {} _data['template'] =", "self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception as e: pass self.save_log(_data) def save_log(self, _data): Log.objects.create(**_data) def", "LogStatus.REJECTED, 'invalid': LogStatus.INVALID, } def __init__(self): pass def log_email(self, email): for recipient in", "'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned':", "'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE, } # The sending status", "'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID, } def __init__(self): pass def log_email(self, email):", "return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def translate_enum(self, _dict, _str, _default=None): try: return _dict[_str] except", "_data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception as e: pass self.save_log(_data) def save_log(self, _data):", "try: return user.objects.get(email=email) except Exception as e: print(e) return None def get_reason_enum(self, reason):", "import get_user_model from .enums import LogReason, LogStatus from .models import Log class MandrillLogger():", "user = get_user_model() try: return user.objects.get(email=email) except Exception as e: print(e) return None", "except Exception as e: pass self.save_log(_data) def save_log(self, _data): Log.objects.create(**_data) def get_user_from_email(self, email):", "def __init__(self): pass def log_email(self, email): for recipient in email.to: _data = {}", "email.template_name _data['email'] = recipient _data['user'] = self.get_user_from_email(recipient) try: mandrill_response = email.mandrill_response[0] _data['mandrill_id'] =", "LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID, } def __init__(self): pass", "recipient _data['user'] = self.get_user_from_email(recipient) try: mandrill_response = email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id'] _data['meta_data'] =", "self.save_log(_data) def save_log(self, _data): Log.objects.create(**_data) def get_user_from_email(self, email): user = get_user_model() try: return", "LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID, } def __init__(self): pass def log_email(self, email): for", "def translate_enum(self, _dict, _str, _default=None): try: return _dict[_str] except Exception as e: return", "Exception as e: pass self.save_log(_data) def save_log(self, _data): Log.objects.create(**_data) def get_user_from_email(self, email): user", "status of the recipient - either \"sent\", \"queued\", \"scheduled\", \"rejected\", or \"invalid\" STATUS_TRANSLATOR", "\"sent\", \"queued\", \"scheduled\", \"rejected\", or \"invalid\" STATUS_TRANSLATOR = { 'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED,", "REASON_TRANSLATOR = { 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM,", "e: pass self.save_log(_data) def save_log(self, _data): Log.objects.create(**_data) def get_user_from_email(self, email): user = get_user_model()", "self.get_user_from_email(recipient) try: mandrill_response = email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id'] _data['meta_data'] = mandrill_response _data['status'] =", "'rule': LogReason.RULE, } # The sending status of the recipient - either \"sent\",", "of the recipient - either \"sent\", \"queued\", \"scheduled\", \"rejected\", or \"invalid\" STATUS_TRANSLATOR =", "the recipient - either \"sent\", \"queued\", \"scheduled\", \"rejected\", or \"invalid\" STATUS_TRANSLATOR = {", "_data = {} _data['template'] = email.template_name _data['email'] = recipient _data['user'] = self.get_user_from_email(recipient) try:", "Log class MandrillLogger(): REASON_TRANSLATOR = { 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub':", "for recipient in email.to: _data = {} _data['template'] = email.template_name _data['email'] = recipient", "LogStatus.DEFAULT) def translate_enum(self, _dict, _str, _default=None): try: return _dict[_str] except Exception as e:", "'invalid': LogStatus.INVALID, } def __init__(self): pass def log_email(self, email): for recipient in email.to:", "return user.objects.get(email=email) except Exception as e: print(e) return None def get_reason_enum(self, reason): return", "= {} _data['template'] = email.template_name _data['email'] = recipient _data['user'] = self.get_user_from_email(recipient) try: mandrill_response", "LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT,", "'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID, } def __init__(self):", "= { 'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled': LogStatus.SCHEDULED, 'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID, }", "= recipient _data['user'] = self.get_user_from_email(recipient) try: mandrill_response = email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id'] _data['meta_data']", "email): user = get_user_model() try: return user.objects.get(email=email) except Exception as e: print(e) return", "'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE, 'spam': LogReason.SPAM, 'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid':", "email): for recipient in email.to: _data = {} _data['template'] = email.template_name _data['email'] =", "} def __init__(self): pass def log_email(self, email): for recipient in email.to: _data =", "def get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR, status,", "recipient in email.to: _data = {} _data['template'] = email.template_name _data['email'] = recipient _data['user']", "_data['meta_data'] = mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception", "LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE, } # The sending status of the recipient", "'rejected': LogStatus.REJECTED, 'invalid': LogStatus.INVALID, } def __init__(self): pass def log_email(self, email): for recipient", "recipient - either \"sent\", \"queued\", \"scheduled\", \"rejected\", or \"invalid\" STATUS_TRANSLATOR = { 'sent':", "'unsub': LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule':", "pass self.save_log(_data) def save_log(self, _data): Log.objects.create(**_data) def get_user_from_email(self, email): user = get_user_model() try:", "status, LogStatus.DEFAULT) def translate_enum(self, _dict, _str, _default=None): try: return _dict[_str] except Exception as", "as e: pass self.save_log(_data) def save_log(self, _data): Log.objects.create(**_data) def get_user_from_email(self, email): user =", "{} _data['template'] = email.template_name _data['email'] = recipient _data['user'] = self.get_user_from_email(recipient) try: mandrill_response =", "mandrill_response = email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id'] _data['meta_data'] = mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status', None))", "try: mandrill_response = email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id'] _data['meta_data'] = mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status',", "from .enums import LogReason, LogStatus from .models import Log class MandrillLogger(): REASON_TRANSLATOR =", "\"queued\", \"scheduled\", \"rejected\", or \"invalid\" STATUS_TRANSLATOR = { 'sent': LogStatus.SENT, 'queued': LogStatus.QUEUED, 'scheduled':", "_data): Log.objects.create(**_data) def get_user_from_email(self, email): user = get_user_model() try: return user.objects.get(email=email) except Exception", "from .models import Log class MandrillLogger(): REASON_TRANSLATOR = { 'hard-bounce': LogReason.HARD_BOUNCE, 'soft-bounce': LogReason.SOFT_BOUNCE,", "__init__(self): pass def log_email(self, email): for recipient in email.to: _data = {} _data['template']", "mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception as e:", "def get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def translate_enum(self, _dict, _str, _default=None): try:", "except Exception as e: print(e) return None def get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR, reason,", "= mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None)) except Exception as", "_data['user'] = self.get_user_from_email(recipient) try: mandrill_response = email.mandrill_response[0] _data['mandrill_id'] = mandrill_response['_id'] _data['meta_data'] = mandrill_response", "email.to: _data = {} _data['template'] = email.template_name _data['email'] = recipient _data['user'] = self.get_user_from_email(recipient)", "'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE, } # The", "None def get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR,", "translate_enum(self, _dict, _str, _default=None): try: return _dict[_str] except Exception as e: return _default", "get_status_enum(self, status): return self.translate_enum(self.STATUS_TRANSLATOR, status, LogStatus.DEFAULT) def translate_enum(self, _dict, _str, _default=None): try: return", "mandrill_response['_id'] _data['meta_data'] = mandrill_response _data['status'] = self.get_status_enum(mandrill_response.get('status', None)) _data['reason'] = self.get_reason_enum(mandrill_response.get('reject_reason', None)) except", "in email.to: _data = {} _data['template'] = email.template_name _data['email'] = recipient _data['user'] =", "LogReason.UNSUB, 'custom': LogReason.CUSTOM, 'invalid-sender': LogReason.INVALID_SENDER, 'invalid': LogReason.INVALID, 'test-mode-limit': LogReason.TEST_MODE_LIMIT, 'unsigned': LogReason.UNSIGNED, 'rule': LogReason.RULE,", "e: print(e) return None def get_reason_enum(self, reason): return self.translate_enum(self.REASON_TRANSLATOR, reason, LogReason.NA) def get_status_enum(self," ]
[ "<integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string> <key>classname</key> <string>%(classname)s</string> <key>static</key> <true/> </dict> <key>shapes</key> <array> <dict> <key>x</key> <integer>0</integer>", "</array> </dict> \"\"\" JOINTS = \"\"\"\"\"\" CONTACTS = \"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs):", "BODIES = \"\"\" <dict> <key>body</key> <dict> <key>x</key> <integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer> <key>width</key> <integer>10</integer> <key>height</key>", "<key>x</key> <integer>0</integer> <key>y</key> <integer>0</integer> <key>width</key> <integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer> <key>type</key> <string>circ</string> <key>friction</key> <real>1</real> <key>density</key>", "<integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer> <key>width</key> <integer>10</integer> <key>height</key> <integer>10</integer> <key>sheet_id</key> <integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string>", "<key>density</key> <integer>1</integer> <key>restitution</key> <real>0</real> <key>sensor</key> <true/> </dict> </array> </dict> \"\"\" JOINTS = \"\"\"\"\"\"", "\"\"\"\"\"\" CONTACTS = \"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs): self.params = kwargs self.params['name'] =", "<key>static</key> <true/> </dict> <key>shapes</key> <array> <dict> <key>x</key> <integer>0</integer> <key>y</key> <integer>0</integer> <key>width</key> <integer>%(width)s</integer> <key>height</key>", "<true/> </dict> <key>shapes</key> <array> <dict> <key>x</key> <integer>0</integer> <key>y</key> <integer>0</integer> <key>width</key> <integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer>", "<string>%(classname)s</string> <key>static</key> <true/> </dict> <key>shapes</key> <array> <dict> <key>x</key> <integer>0</integer> <key>y</key> <integer>0</integer> <key>width</key> <integer>%(width)s</integer>", "<key>id</key> <integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string> <key>classname</key> <string>%(classname)s</string> <key>static</key> <true/> </dict> <key>shapes</key> <array> <dict> <key>x</key>", "<integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer> <key>type</key> <string>circ</string> <key>friction</key> <real>1</real> <key>density</key> <integer>1</integer> <key>restitution</key> <real>0</real> <key>sensor</key> <true/>", "<key>sensor</key> <true/> </dict> </array> </dict> \"\"\" JOINTS = \"\"\"\"\"\" CONTACTS = \"\"\"\"\"\" class", "<dict> <key>x</key> <integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer> <key>width</key> <integer>10</integer> <key>height</key> <integer>10</integer> <key>sheet_id</key> <integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer>", "<key>width</key> <integer>10</integer> <key>height</key> <integer>10</integer> <key>sheet_id</key> <integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string> <key>classname</key> <string>%(classname)s</string> <key>static</key>", "<reponame>fullscreennl/bullettime import PhysicsMixin import ID BODIES = \"\"\" <dict> <key>body</key> <dict> <key>x</key> <integer>%(x)s</integer>", "<integer>1</integer> <key>restitution</key> <real>0</real> <key>sensor</key> <true/> </dict> </array> </dict> \"\"\" JOINTS = \"\"\"\"\"\" CONTACTS", "import PhysicsMixin import ID BODIES = \"\"\" <dict> <key>body</key> <dict> <key>x</key> <integer>%(x)s</integer> <key>y</key>", "<key>y</key> <integer>%(y)s</integer> <key>width</key> <integer>10</integer> <key>height</key> <integer>10</integer> <key>sheet_id</key> <integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string> <key>classname</key>", "</dict> </array> </dict> \"\"\" JOINTS = \"\"\"\"\"\" CONTACTS = \"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def", "<integer>%(height)s</integer> <key>type</key> <string>circ</string> <key>friction</key> <real>1</real> <key>density</key> <integer>1</integer> <key>restitution</key> <real>0</real> <key>sensor</key> <true/> </dict> </array>", "<key>height</key> <integer>10</integer> <key>sheet_id</key> <integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string> <key>classname</key> <string>%(classname)s</string> <key>static</key> <true/> </dict>", "<integer>0</integer> <key>width</key> <integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer> <key>type</key> <string>circ</string> <key>friction</key> <real>1</real> <key>density</key> <integer>1</integer> <key>restitution</key> <real>0</real>", "\"\"\" <dict> <key>body</key> <dict> <key>x</key> <integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer> <key>width</key> <integer>10</integer> <key>height</key> <integer>10</integer> <key>sheet_id</key>", "PhysicsMixin import ID BODIES = \"\"\" <dict> <key>body</key> <dict> <key>x</key> <integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer>", "JOINTS = \"\"\"\"\"\" CONTACTS = \"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs): self.params = kwargs", "self.params['name'] = \"ScrollTarget\" self.process(kwargs) self.addDefault('classname','') self.params['__objID__'] = ID.next() def render(self): return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params)", "\"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs): self.params = kwargs self.params['name'] = \"ScrollTarget\" self.process(kwargs) self.addDefault('classname','')", "__init__(self,**kwargs): self.params = kwargs self.params['name'] = \"ScrollTarget\" self.process(kwargs) self.addDefault('classname','') self.params['__objID__'] = ID.next() def", "<key>width</key> <integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer> <key>type</key> <string>circ</string> <key>friction</key> <real>1</real> <key>density</key> <integer>1</integer> <key>restitution</key> <real>0</real> <key>sensor</key>", "<string>%(name)s</string> <key>classname</key> <string>%(classname)s</string> <key>static</key> <true/> </dict> <key>shapes</key> <array> <dict> <key>x</key> <integer>0</integer> <key>y</key> <integer>0</integer>", "</dict> <key>shapes</key> <array> <dict> <key>x</key> <integer>0</integer> <key>y</key> <integer>0</integer> <key>width</key> <integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer> <key>type</key>", "<key>sheet_id</key> <integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string> <key>classname</key> <string>%(classname)s</string> <key>static</key> <true/> </dict> <key>shapes</key> <array>", "<key>y</key> <integer>0</integer> <key>width</key> <integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer> <key>type</key> <string>circ</string> <key>friction</key> <real>1</real> <key>density</key> <integer>1</integer> <key>restitution</key>", "def __init__(self,**kwargs): self.params = kwargs self.params['name'] = \"ScrollTarget\" self.process(kwargs) self.addDefault('classname','') self.params['__objID__'] = ID.next()", "<array> <dict> <key>x</key> <integer>0</integer> <key>y</key> <integer>0</integer> <key>width</key> <integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer> <key>type</key> <string>circ</string> <key>friction</key>", "<dict> <key>body</key> <dict> <key>x</key> <integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer> <key>width</key> <integer>10</integer> <key>height</key> <integer>10</integer> <key>sheet_id</key> <integer>5</integer>", "= \"\"\"\"\"\" CONTACTS = \"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs): self.params = kwargs self.params['name']", "ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs): self.params = kwargs self.params['name'] = \"ScrollTarget\" self.process(kwargs) self.addDefault('classname','') self.params['__objID__'] =", "<real>0</real> <key>sensor</key> <true/> </dict> </array> </dict> \"\"\" JOINTS = \"\"\"\"\"\" CONTACTS = \"\"\"\"\"\"", "\"\"\" JOINTS = \"\"\"\"\"\" CONTACTS = \"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs): self.params =", "= kwargs self.params['name'] = \"ScrollTarget\" self.process(kwargs) self.addDefault('classname','') self.params['__objID__'] = ID.next() def render(self): return(", "self.params = kwargs self.params['name'] = \"ScrollTarget\" self.process(kwargs) self.addDefault('classname','') self.params['__objID__'] = ID.next() def render(self):", "<key>friction</key> <real>1</real> <key>density</key> <integer>1</integer> <key>restitution</key> <real>0</real> <key>sensor</key> <true/> </dict> </array> </dict> \"\"\" JOINTS", "<integer>%(y)s</integer> <key>width</key> <integer>10</integer> <key>height</key> <integer>10</integer> <key>sheet_id</key> <integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string> <key>classname</key> <string>%(classname)s</string>", "ID.next() def render(self): return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params) if __name__ == \"__main__\": print \"no test", "self.params['__objID__'] = ID.next() def render(self): return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params) if __name__ == \"__main__\": print", "CONTACTS = \"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs): self.params = kwargs self.params['name'] = \"ScrollTarget\"", "\"ScrollTarget\" self.process(kwargs) self.addDefault('classname','') self.params['__objID__'] = ID.next() def render(self): return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params) if __name__", "<key>name</key> <string>%(name)s</string> <key>classname</key> <string>%(classname)s</string> <key>static</key> <true/> </dict> <key>shapes</key> <array> <dict> <key>x</key> <integer>0</integer> <key>y</key>", "class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs): self.params = kwargs self.params['name'] = \"ScrollTarget\" self.process(kwargs) self.addDefault('classname','') self.params['__objID__']", "<key>shapes</key> <array> <dict> <key>x</key> <integer>0</integer> <key>y</key> <integer>0</integer> <key>width</key> <integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer> <key>type</key> <string>circ</string>", "<true/> </dict> </array> </dict> \"\"\" JOINTS = \"\"\"\"\"\" CONTACTS = \"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin):", "= \"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs): self.params = kwargs self.params['name'] = \"ScrollTarget\" self.process(kwargs)", "</dict> \"\"\" JOINTS = \"\"\"\"\"\" CONTACTS = \"\"\"\"\"\" class ABTargetScrollFocusSprite(PhysicsMixin.PhysicsMixin): def __init__(self,**kwargs): self.params", "<dict> <key>x</key> <integer>0</integer> <key>y</key> <integer>0</integer> <key>width</key> <integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer> <key>type</key> <string>circ</string> <key>friction</key> <real>1</real>", "ID BODIES = \"\"\" <dict> <key>body</key> <dict> <key>x</key> <integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer> <key>width</key> <integer>10</integer>", "import ID BODIES = \"\"\" <dict> <key>body</key> <dict> <key>x</key> <integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer> <key>width</key>", "self.process(kwargs) self.addDefault('classname','') self.params['__objID__'] = ID.next() def render(self): return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params) if __name__ ==", "<integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string> <key>classname</key> <string>%(classname)s</string> <key>static</key> <true/> </dict> <key>shapes</key> <array> <dict>", "<key>classname</key> <string>%(classname)s</string> <key>static</key> <true/> </dict> <key>shapes</key> <array> <dict> <key>x</key> <integer>0</integer> <key>y</key> <integer>0</integer> <key>width</key>", "<string>circ</string> <key>friction</key> <real>1</real> <key>density</key> <integer>1</integer> <key>restitution</key> <real>0</real> <key>sensor</key> <true/> </dict> </array> </dict> \"\"\"", "= ID.next() def render(self): return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params) if __name__ == \"__main__\": print \"no", "<key>x</key> <integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer> <key>width</key> <integer>10</integer> <key>height</key> <integer>10</integer> <key>sheet_id</key> <integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer> <key>name</key>", "<integer>0</integer> <key>y</key> <integer>0</integer> <key>width</key> <integer>%(width)s</integer> <key>height</key> <integer>%(height)s</integer> <key>type</key> <string>circ</string> <key>friction</key> <real>1</real> <key>density</key> <integer>1</integer>", "<real>1</real> <key>density</key> <integer>1</integer> <key>restitution</key> <real>0</real> <key>sensor</key> <true/> </dict> </array> </dict> \"\"\" JOINTS =", "def render(self): return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params) if __name__ == \"__main__\": print \"no test avaiable\"", "<integer>10</integer> <key>height</key> <integer>10</integer> <key>sheet_id</key> <integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string> <key>classname</key> <string>%(classname)s</string> <key>static</key> <true/>", "<key>restitution</key> <real>0</real> <key>sensor</key> <true/> </dict> </array> </dict> \"\"\" JOINTS = \"\"\"\"\"\" CONTACTS =", "self.addDefault('classname','') self.params['__objID__'] = ID.next() def render(self): return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params) if __name__ == \"__main__\":", "kwargs self.params['name'] = \"ScrollTarget\" self.process(kwargs) self.addDefault('classname','') self.params['__objID__'] = ID.next() def render(self): return( BODIES%self.params,", "<key>body</key> <dict> <key>x</key> <integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer> <key>width</key> <integer>10</integer> <key>height</key> <integer>10</integer> <key>sheet_id</key> <integer>5</integer> <key>id</key>", "= \"\"\" <dict> <key>body</key> <dict> <key>x</key> <integer>%(x)s</integer> <key>y</key> <integer>%(y)s</integer> <key>width</key> <integer>10</integer> <key>height</key> <integer>10</integer>", "<integer>10</integer> <key>sheet_id</key> <integer>5</integer> <key>id</key> <integer>%(__objID__)s</integer> <key>name</key> <string>%(name)s</string> <key>classname</key> <string>%(classname)s</string> <key>static</key> <true/> </dict> <key>shapes</key>", "<key>height</key> <integer>%(height)s</integer> <key>type</key> <string>circ</string> <key>friction</key> <real>1</real> <key>density</key> <integer>1</integer> <key>restitution</key> <real>0</real> <key>sensor</key> <true/> </dict>", "= \"ScrollTarget\" self.process(kwargs) self.addDefault('classname','') self.params['__objID__'] = ID.next() def render(self): return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params) if", "<key>type</key> <string>circ</string> <key>friction</key> <real>1</real> <key>density</key> <integer>1</integer> <key>restitution</key> <real>0</real> <key>sensor</key> <true/> </dict> </array> </dict>" ]
[ ".gz documents from S2 manifest manifestFile = open(fileLocation, \"r\") manifestFile = manifestFile.read() manifestFileList", "import re def timeit(method): def timed(*args, **kw): ts = time.time() result = method(*args,", "than distributing massive amounts to each core batchSize = 10000 chunks = (len(fileContentsList)-1)", "[] with concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(convertToJson, batch) for result in results:", "finish1 = time.time() print(f'Time to extract keyterms {finish1-start1:0.1f}') batchCount += 1 if __name__", "fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch {batchCount} - Size: {len(batch)}') # # English Sieving # docListEnglish", "# outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close() # finishWriteToDisk = time.time() # print(f'Time to check english", "for doc in docListJSON: #docListEnglish: result = matchDocWithKeyTerms(doc) if result is not None:", "gzip import json from langdetect import detect import time from pprint import pprint", "files: {len(fileContentsList)}') # Batch processing files rather than distributing massive amounts to each", "if 's2' in file] return manifestFileList def convertToJson(file): try: docJSON = json.loads(file) return", "None: # # print(f'{resultCount} - {result}') # docListEnglish.append(result) # else: # # print(f'{resultCount}", "file_content = f.read().decode('utf-8') f.close() fileContentsList = file_content.split('\\n') return fileContentsList def getManifest(fileLocation): # Get", "executor.map(convertToJson, batch) for result in results: if result is not None: docListJSON.append(result) else:", "# print(f'ERROR: {e}') pass def matchDocWithKeyTerms(file): # print(file) # keyTermsList = ['summarisation', 'summarization',", "re def timeit(method): def timed(*args, **kw): ts = time.time() result = method(*args, **kw)", "import codecs import re def timeit(method): def timed(*args, **kw): ts = time.time() result", "results: if result is not None: docListJSON.append(result) else: pass finishJSONConvert = time.time() print(f'Time", "batch) # # resultCount = 1 # for result in results: # if", "print(f'Processing: {s2Corpus}') s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList = unzipS2Contents(s2CorpusUrl) print(f'Number of files:", "{finishWriteToDisk-start:0.1f}') docListJSON = [] with concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(convertToJson, batch) for", "ts = time.time() result = method(*args, **kw) te = time.time() print('%r %2.2f seconds'", "1 for i in range(chunks): start = time.time() batch = fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch", "from langdetect import detect import time from pprint import pprint import concurrent.futures import", "# for englishDocContent in docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close() # finishWriteToDisk = time.time()", "title specified. return docJSON # return f'Year: {docJSON[\"year\"]} - Title: {docJSON[\"title\"]} - ID:", "englishDocContent in docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close() # finishWriteToDisk = time.time() # print(f'Time", "outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\") # for englishDocContent in docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') #", "as it's too general when it matches by itself docJSON = json.loads(json.dumps(file)) #", "codecs import re def timeit(method): def timed(*args, **kw): ts = time.time() result =", "return docJSON else: pass @timeit def main(): s2CorpusList = getManifest(r\"data/sample/semantic scholar manifest 2020-03.txt\")", "+ 1 batchCount = 1 for i in range(chunks): start = time.time() batch", "\"r\") manifestFile = manifestFile.read() manifestFileList = manifestFile.split('\\n') manifestFileList = [file for file in", "= [] with concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(convertToJson, batch) for result in", "manifestFileList = [file for file in manifestFileList if 's2' in file] return manifestFileList", "{batchCount} - Size: {len(batch)}') # # English Sieving # docListEnglish = [] #", "# print(f'Time to write to disk {finishWriteToDisk-start:0.1f}') docListJSON = [] with concurrent.futures.ProcessPoolExecutor() as", "concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(convertToJson, batch) for result in results: if result", "# # resultCount = 1 # for result in results: # if result", "result in results: # if result is not None: # # print(f'{resultCount} -", "Get list of .gz documents from S2 manifest manifestFile = open(fileLocation, \"r\") manifestFile", "\"utf-8\") for docMatched in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1 = time.time() print(f'Time to extract", "# spelling problems...; removed automatic as it's too general when it matches by", "(re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED: {docJSON[\"title\"]}') return docJSON else: pass @timeit def main(): s2CorpusList", "core batchSize = 10000 chunks = (len(fileContentsList)-1) // batchSize + 1 batchCount =", "docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close() # finishWriteToDisk = time.time() # print(f'Time to check", "# Get list of .gz documents from S2 manifest manifestFile = open(fileLocation, \"r\")", "= time.time() print(f'Time to extract keyterms {finish1-start1:0.1f}') batchCount += 1 if __name__ ==", "print(f'{resultCount} - {result}') # docListEnglish.append(result) # else: # # print(f'{resultCount} - Non-English Title')", "{finishJSONConvert-start:0.1f}') # Key Term Matching start1 = time.time() docKeyTermMatched = [] for doc", "else: pass @timeit def main(): s2CorpusList = getManifest(r\"data/sample/semantic scholar manifest 2020-03.txt\") for s2Corpus", "dict... # keyTermsMatched = set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern, docJSON[\"title\"].lower())) or", "timed @timeit def unzipS2Contents(url): f = gzip.open(url, 'rb') file_content = f.read().decode('utf-8') f.close() fileContentsList", "in results: if result is not None: docListJSON.append(result) else: pass finishJSONConvert = time.time()", "try: docJSON = json.loads(file) return docJSON except Exception as e: # print(f'ERROR: {e}')", "\"utf-8\") # for englishDocContent in docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close() # finishWriteToDisk =", "file] return manifestFileList def convertToJson(file): try: docJSON = json.loads(file) return docJSON except Exception", "time.time() print('%r %2.2f seconds' % (method.__name__, (te - ts))) return result return timed", "\"w\", \"utf-8\") for docMatched in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1 = time.time() print(f'Time to", "docJSON # return f'Year: {docJSON[\"year\"]} - Title: {docJSON[\"title\"]} - ID: {docJSON[\"id\"]}' except Exception", "time.time() print(f'Time to convert to JSON {finishJSONConvert-start:0.1f}') # Key Term Matching start1 =", "removed automatic as it's too general when it matches by itself docJSON =", "= [file for file in manifestFileList if 's2' in file] return manifestFileList def", "it's too general when it matches by itself docJSON = json.loads(json.dumps(file)) # dumps", "= time.time() docKeyTermMatched = [] for doc in docListJSON: #docListEnglish: result = matchDocWithKeyTerms(doc)", "= (len(fileContentsList)-1) // batchSize + 1 batchCount = 1 for i in range(chunks):", "docListJSON = [] with concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(convertToJson, batch) for result", "executor.map(getEnglishDoc, batch) # # resultCount = 1 # for result in results: #", "result is not None: docListJSON.append(result) else: pass finishJSONConvert = time.time() print(f'Time to convert", "def getManifest(fileLocation): # Get list of .gz documents from S2 manifest manifestFile =", "for englishDocContent in docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close() # finishWriteToDisk = time.time() #", "fileContentsList = file_content.split('\\n') return fileContentsList def getManifest(fileLocation): # Get list of .gz documents", "return timed @timeit def unzipS2Contents(url): f = gzip.open(url, 'rb') file_content = f.read().decode('utf-8') f.close()", "= f.read().decode('utf-8') f.close() fileContentsList = file_content.split('\\n') return fileContentsList def getManifest(fileLocation): # Get list", "e: # print(f'ERROR: {e}') pass def matchDocWithKeyTerms(file): # print(file) # keyTermsList = ['summarisation',", "to each core batchSize = 10000 chunks = (len(fileContentsList)-1) // batchSize + 1", "s2CorpusList: print(f'Processing: {s2Corpus}') s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList = unzipS2Contents(s2CorpusUrl) print(f'Number of", "for result in results: # if result is not None: # # print(f'{resultCount}", "for s2Corpus in s2CorpusList: print(f'Processing: {s2Corpus}') s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList =", "= getManifest(r\"data/sample/semantic scholar manifest 2020-03.txt\") for s2Corpus in s2CorpusList: print(f'Processing: {s2Corpus}') s2CorpusUrl =", "{e}') pass def getEnglishDoc(file): try: docJSON = json.loads(file) if detect(docJSON[\"title\"]) == 'en': #", "from S2 manifest manifestFile = open(fileLocation, \"r\") manifestFile = manifestFile.read() manifestFileList = manifestFile.split('\\n')", "each core batchSize = 10000 chunks = (len(fileContentsList)-1) // batchSize + 1 batchCount", "- ID: {docJSON[\"id\"]}' except Exception as e: # print(f'ERROR: {e}') pass def matchDocWithKeyTerms(file):", "s2CorpusFname = s2Corpus.split('.')[0] outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\") for docMatched in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n')", "outputFileEnglishDoc.close() # finishWriteToDisk = time.time() # print(f'Time to check english {finishCheckEng-start:0.1f}') # print(f'Time", "getManifest(fileLocation): # Get list of .gz documents from S2 manifest manifestFile = open(fileLocation,", "[file for file in manifestFileList if 's2' in file] return manifestFileList def convertToJson(file):", "= manifestFile.read() manifestFileList = manifestFile.split('\\n') manifestFileList = [file for file in manifestFileList if", "print(f'Time to write to disk {finishWriteToDisk-start:0.1f}') docListJSON = [] with concurrent.futures.ProcessPoolExecutor() as executor:", "print(f'Time to extract keyterms {finish1-start1:0.1f}') batchCount += 1 if __name__ == '__main__': main()", "manifestFileList def convertToJson(file): try: docJSON = json.loads(file) return docJSON except Exception as e:", "is dict... # keyTermsMatched = set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern, docJSON[\"title\"].lower()))", "{docJSON[\"title\"]} - ID: {docJSON[\"id\"]}' except Exception as e: # print(f'ERROR: {e}') pass def", "# outputFileEnglishDoc.close() # finishWriteToDisk = time.time() # print(f'Time to check english {finishCheckEng-start:0.1f}') #", "outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close() # finishWriteToDisk = time.time() # print(f'Time to check english {finishCheckEng-start:0.1f}')", "# outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\") # for englishDocContent in docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n')", "')).intersection(set(keyTermsList)) pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern, docJSON[\"title\"].lower())) or (re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED: {docJSON[\"title\"]}')", "+= 1 # finishCheckEng = time.time() # outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\") #", "(re.match(pattern, docJSON[\"title\"].lower())) or (re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED: {docJSON[\"title\"]}') return docJSON else: pass @timeit", "as e: # print(f'ERROR: {e}') pass def matchDocWithKeyTerms(file): # print(file) # keyTermsList =", "None: # print(result) docKeyTermMatched.append(result) s2CorpusFname = s2Corpus.split('.')[0] outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\") for", "= set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern, docJSON[\"title\"].lower())) or (re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC", "EDA\\data\\raw\\{s2Corpus}\" fileContentsList = unzipS2Contents(s2CorpusUrl) print(f'Number of files: {len(fileContentsList)}') # Batch processing files rather", "concurrent.futures.ProcessPoolExecutor() as executor: # results = executor.map(getEnglishDoc, batch) # # resultCount = 1", "= open(fileLocation, \"r\") manifestFile = manifestFile.read() manifestFileList = manifestFile.split('\\n') manifestFileList = [file for", "start = time.time() batch = fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch {batchCount} - Size: {len(batch)}') #", "print(f'ERROR: {e}') pass def getEnglishDoc(file): try: docJSON = json.loads(file) if detect(docJSON[\"title\"]) == 'en':", "batchSize + 1 batchCount = 1 for i in range(chunks): start = time.time()", "for file in manifestFileList if 's2' in file] return manifestFileList def convertToJson(file): try:", "if result is not None: docListJSON.append(result) else: pass finishJSONConvert = time.time() print(f'Time to", "1 # for result in results: # if result is not None: #", "result is not None: # # print(f'{resultCount} - {result}') # docListEnglish.append(result) # else:", "if input is dict... # keyTermsMatched = set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if", "chunks = (len(fileContentsList)-1) // batchSize + 1 batchCount = 1 for i in", "time.time() # outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\") # for englishDocContent in docListEnglish: #", "timed(*args, **kw): ts = time.time() result = method(*args, **kw) te = time.time() print('%r", "= file_content.split('\\n') return fileContentsList def getManifest(fileLocation): # Get list of .gz documents from", "def convertToJson(file): try: docJSON = json.loads(file) return docJSON except Exception as e: #", "Exception as e: # print(f'ERROR: {e}') pass def getEnglishDoc(file): try: docJSON = json.loads(file)", "if result is not None: # print(result) docKeyTermMatched.append(result) s2CorpusFname = s2Corpus.split('.')[0] outputFileKeyTermMatched =", "= manifestFile.split('\\n') manifestFileList = [file for file in manifestFileList if 's2' in file]", "problems...; removed automatic as it's too general when it matches by itself docJSON", "Matching start1 = time.time() docKeyTermMatched = [] for doc in docListJSON: #docListEnglish: result", "= fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList = unzipS2Contents(s2CorpusUrl) print(f'Number of files: {len(fileContentsList)}') # Batch", "codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\") for docMatched in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1 = time.time() print(f'Time", "start1 = time.time() docKeyTermMatched = [] for doc in docListJSON: #docListEnglish: result =", "fileContentsList def getManifest(fileLocation): # Get list of .gz documents from S2 manifest manifestFile", "= executor.map(convertToJson, batch) for result in results: if result is not None: docListJSON.append(result)", "@author: <NAME> \"\"\" from tqdm import tqdm import gzip import json from langdetect", "{len(batch)}') # # English Sieving # docListEnglish = [] # with concurrent.futures.ProcessPoolExecutor() as", "docListJSON.append(result) else: pass finishJSONConvert = time.time() print(f'Time to convert to JSON {finishJSONConvert-start:0.1f}') #", "json.loads(json.dumps(file)) # dumps if input is dict... # keyTermsMatched = set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern", "fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList = unzipS2Contents(s2CorpusUrl) print(f'Number of files: {len(fileContentsList)}') # Batch processing", "docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED: {docJSON[\"title\"]}') return docJSON else: pass @timeit def main(): s2CorpusList =", "= matchDocWithKeyTerms(doc) if result is not None: # print(result) docKeyTermMatched.append(result) s2CorpusFname = s2Corpus.split('.')[0]", "# finishWriteToDisk = time.time() # print(f'Time to check english {finishCheckEng-start:0.1f}') # print(f'Time to", "pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern, docJSON[\"title\"].lower())) or (re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED: {docJSON[\"title\"]}') return", "= s2Corpus.split('.')[0] outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\") for docMatched in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close()", "seconds' % (method.__name__, (te - ts))) return result return timed @timeit def unzipS2Contents(url):", "\"\"\" @author: <NAME> \"\"\" from tqdm import tqdm import gzip import json from", "convertToJson(file): try: docJSON = json.loads(file) return docJSON except Exception as e: # print(f'ERROR:", "detect(docJSON[\"title\"]) == 'en': # can have errors if there isn't a title specified.", "if (re.match(pattern, docJSON[\"title\"].lower())) or (re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED: {docJSON[\"title\"]}') return docJSON else: pass", "= 1 for i in range(chunks): start = time.time() batch = fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing", "# if result is not None: # # print(f'{resultCount} - {result}') # docListEnglish.append(result)", "is not None: docListJSON.append(result) else: pass finishJSONConvert = time.time() print(f'Time to convert to", "# keyTermsMatched = set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern, docJSON[\"title\"].lower())) or (re.match(pattern,", "english {finishCheckEng-start:0.1f}') # print(f'Time to write to disk {finishWriteToDisk-start:0.1f}') docListJSON = [] with", "'summarizatio'] # spelling problems...; removed automatic as it's too general when it matches", "\"w\", \"utf-8\") # for englishDocContent in docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close() # finishWriteToDisk", "% (method.__name__, (te - ts))) return result return timed @timeit def unzipS2Contents(url): f", "== 'en': # can have errors if there isn't a title specified. return", "time.time() docKeyTermMatched = [] for doc in docListJSON: #docListEnglish: result = matchDocWithKeyTerms(doc) if", "in docListJSON: #docListEnglish: result = matchDocWithKeyTerms(doc) if result is not None: # print(result)", "ts))) return result return timed @timeit def unzipS2Contents(url): f = gzip.open(url, 'rb') file_content", "return f'Year: {docJSON[\"year\"]} - Title: {docJSON[\"title\"]} - ID: {docJSON[\"id\"]}' except Exception as e:", "# print(result) docKeyTermMatched.append(result) s2CorpusFname = s2Corpus.split('.')[0] outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\") for docMatched", "manifestFile = open(fileLocation, \"r\") manifestFile = manifestFile.read() manifestFileList = manifestFile.split('\\n') manifestFileList = [file", "docJSON except Exception as e: # print(f'ERROR: {e}') pass def getEnglishDoc(file): try: docJSON", "'en': # can have errors if there isn't a title specified. return docJSON", "outputFileKeyTermMatched.close() finish1 = time.time() print(f'Time to extract keyterms {finish1-start1:0.1f}') batchCount += 1 if", "amounts to each core batchSize = 10000 chunks = (len(fileContentsList)-1) // batchSize +", "= time.time() # print(f'Time to check english {finishCheckEng-start:0.1f}') # print(f'Time to write to", "result = method(*args, **kw) te = time.time() print('%r %2.2f seconds' % (method.__name__, (te", "# print(f'ERROR: {e}') pass def getEnglishDoc(file): try: docJSON = json.loads(file) if detect(docJSON[\"title\"]) ==", "'s2' in file] return manifestFileList def convertToJson(file): try: docJSON = json.loads(file) return docJSON", "can have errors if there isn't a title specified. return docJSON # return", "by itself docJSON = json.loads(json.dumps(file)) # dumps if input is dict... # keyTermsMatched", "result = matchDocWithKeyTerms(doc) if result is not None: # print(result) docKeyTermMatched.append(result) s2CorpusFname =", "with concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(convertToJson, batch) for result in results: if", "print(f'DOC MATCHED: {docJSON[\"title\"]}') return docJSON else: pass @timeit def main(): s2CorpusList = getManifest(r\"data/sample/semantic", "if there isn't a title specified. return docJSON # return f'Year: {docJSON[\"year\"]} -", "to write to disk {finishWriteToDisk-start:0.1f}') docListJSON = [] with concurrent.futures.ProcessPoolExecutor() as executor: results", "pass @timeit def main(): s2CorpusList = getManifest(r\"data/sample/semantic scholar manifest 2020-03.txt\") for s2Corpus in", "results: # if result is not None: # # print(f'{resultCount} - {result}') #", "2020-03.txt\") for s2Corpus in s2CorpusList: print(f'Processing: {s2Corpus}') s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList", "# return f'Year: {docJSON[\"year\"]} - Title: {docJSON[\"title\"]} - ID: {docJSON[\"id\"]}' except Exception as", "tqdm import tqdm import gzip import json from langdetect import detect import time", "Title') # pass # # resultCount += 1 # finishCheckEng = time.time() #", "batchCount = 1 for i in range(chunks): start = time.time() batch = fileContentsList[i*batchSize:(i+1)*batchSize]", "return docJSON # return f'Year: {docJSON[\"year\"]} - Title: {docJSON[\"title\"]} - ID: {docJSON[\"id\"]}' except", "manifest 2020-03.txt\") for s2Corpus in s2CorpusList: print(f'Processing: {s2Corpus}') s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\"", "Batch processing files rather than distributing massive amounts to each core batchSize =", "in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1 = time.time() print(f'Time to extract keyterms {finish1-start1:0.1f}') batchCount", "with concurrent.futures.ProcessPoolExecutor() as executor: # results = executor.map(getEnglishDoc, batch) # # resultCount =", "1 # finishCheckEng = time.time() # outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\") # for", "file_content.split('\\n') return fileContentsList def getManifest(fileLocation): # Get list of .gz documents from S2", "a title specified. return docJSON # return f'Year: {docJSON[\"year\"]} - Title: {docJSON[\"title\"]} -", "# for result in results: # if result is not None: # #", "['summarisation', 'summarization', 'nlg', 'extractive', 'summeries', 'summarizatio'] # spelling problems...; removed automatic as it's", "for result in results: if result is not None: docListJSON.append(result) else: pass finishJSONConvert", "disk {finishWriteToDisk-start:0.1f}') docListJSON = [] with concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(convertToJson, batch)", "# Key Term Matching start1 = time.time() docKeyTermMatched = [] for doc in", "time from pprint import pprint import concurrent.futures import codecs import re def timeit(method):", "docMatched in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1 = time.time() print(f'Time to extract keyterms {finish1-start1:0.1f}')", "- ts))) return result return timed @timeit def unzipS2Contents(url): f = gzip.open(url, 'rb')", "{e}') pass def matchDocWithKeyTerms(file): # print(file) # keyTermsList = ['summarisation', 'summarization', 'nlg', 'extractive',", "# # print(f'{resultCount} - Non-English Title') # pass # # resultCount += 1", "or (re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED: {docJSON[\"title\"]}') return docJSON else: pass @timeit def main():", "scholar manifest 2020-03.txt\") for s2Corpus in s2CorpusList: print(f'Processing: {s2Corpus}') s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar", "= json.loads(file) return docJSON except Exception as e: # print(f'ERROR: {e}') pass def", "return manifestFileList def convertToJson(file): try: docJSON = json.loads(file) return docJSON except Exception as", "def unzipS2Contents(url): f = gzip.open(url, 'rb') file_content = f.read().decode('utf-8') f.close() fileContentsList = file_content.split('\\n')", "= time.time() # outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\") # for englishDocContent in docListEnglish:", "results = executor.map(convertToJson, batch) for result in results: if result is not None:", "Term Matching start1 = time.time() docKeyTermMatched = [] for doc in docListJSON: #docListEnglish:", "matchDocWithKeyTerms(file): # print(file) # keyTermsList = ['summarisation', 'summarization', 'nlg', 'extractive', 'summeries', 'summarizatio'] #", "not None: # print(result) docKeyTermMatched.append(result) s2CorpusFname = s2Corpus.split('.')[0] outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\")", "**kw) te = time.time() print('%r %2.2f seconds' % (method.__name__, (te - ts))) return", "= codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\") for docMatched in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1 = time.time()", "Key Term Matching start1 = time.time() docKeyTermMatched = [] for doc in docListJSON:", "to check english {finishCheckEng-start:0.1f}') # print(f'Time to write to disk {finishWriteToDisk-start:0.1f}') docListJSON =", "# # print(f'{resultCount} - {result}') # docListEnglish.append(result) # else: # # print(f'{resultCount} -", "in range(chunks): start = time.time() batch = fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch {batchCount} - Size:", "detect import time from pprint import pprint import concurrent.futures import codecs import re", "= json.loads(json.dumps(file)) # dumps if input is dict... # keyTermsMatched = set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList))", "convert to JSON {finishJSONConvert-start:0.1f}') # Key Term Matching start1 = time.time() docKeyTermMatched =", "{docJSON[\"year\"]} - Title: {docJSON[\"title\"]} - ID: {docJSON[\"id\"]}' except Exception as e: # print(f'ERROR:", "batch = fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch {batchCount} - Size: {len(batch)}') # # English Sieving", "# resultCount = 1 # for result in results: # if result is", "MATCHED: {docJSON[\"title\"]}') return docJSON else: pass @timeit def main(): s2CorpusList = getManifest(r\"data/sample/semantic scholar", "ID: {docJSON[\"id\"]}' except Exception as e: # print(f'ERROR: {e}') pass def matchDocWithKeyTerms(file): #", "keyTermsMatched = set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern, docJSON[\"title\"].lower())) or (re.match(pattern, docJSON[\"paperAbstract\"].lower())):", "finishWriteToDisk = time.time() # print(f'Time to check english {finishCheckEng-start:0.1f}') # print(f'Time to write", "{s2Corpus}') s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList = unzipS2Contents(s2CorpusUrl) print(f'Number of files: {len(fileContentsList)}')", "# print(f'Time to check english {finishCheckEng-start:0.1f}') # print(f'Time to write to disk {finishWriteToDisk-start:0.1f}')", "def main(): s2CorpusList = getManifest(r\"data/sample/semantic scholar manifest 2020-03.txt\") for s2Corpus in s2CorpusList: print(f'Processing:", "if detect(docJSON[\"title\"]) == 'en': # can have errors if there isn't a title", "= [] for doc in docListJSON: #docListEnglish: result = matchDocWithKeyTerms(doc) if result is", "s2CorpusList = getManifest(r\"data/sample/semantic scholar manifest 2020-03.txt\") for s2Corpus in s2CorpusList: print(f'Processing: {s2Corpus}') s2CorpusUrl", "= method(*args, **kw) te = time.time() print('%r %2.2f seconds' % (method.__name__, (te -", "docJSON = json.loads(json.dumps(file)) # dumps if input is dict... # keyTermsMatched = set(docJSON[\"title\"].lower().split('", "def matchDocWithKeyTerms(file): # print(file) # keyTermsList = ['summarisation', 'summarization', 'nlg', 'extractive', 'summeries', 'summarizatio']", "- Title: {docJSON[\"title\"]} - ID: {docJSON[\"id\"]}' except Exception as e: # print(f'ERROR: {e}')", "errors if there isn't a title specified. return docJSON # return f'Year: {docJSON[\"year\"]}", "- Non-English Title') # pass # # resultCount += 1 # finishCheckEng =", "time.time() batch = fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch {batchCount} - Size: {len(batch)}') # # English", "batch {batchCount} - Size: {len(batch)}') # # English Sieving # docListEnglish = []", "json from langdetect import detect import time from pprint import pprint import concurrent.futures", "docJSON = json.loads(file) if detect(docJSON[\"title\"]) == 'en': # can have errors if there", "not None: docListJSON.append(result) else: pass finishJSONConvert = time.time() print(f'Time to convert to JSON", "import pprint import concurrent.futures import codecs import re def timeit(method): def timed(*args, **kw):", "print(f'\\nProcessing batch {batchCount} - Size: {len(batch)}') # # English Sieving # docListEnglish =", "# Batch processing files rather than distributing massive amounts to each core batchSize", "'summeries', 'summarizatio'] # spelling problems...; removed automatic as it's too general when it", "else: pass finishJSONConvert = time.time() print(f'Time to convert to JSON {finishJSONConvert-start:0.1f}') # Key", "to JSON {finishJSONConvert-start:0.1f}') # Key Term Matching start1 = time.time() docKeyTermMatched = []", "except Exception as e: # print(f'ERROR: {e}') pass def matchDocWithKeyTerms(file): # print(file) #", "%2.2f seconds' % (method.__name__, (te - ts))) return result return timed @timeit def", "Sieving # docListEnglish = [] # with concurrent.futures.ProcessPoolExecutor() as executor: # results =", "# # English Sieving # docListEnglish = [] # with concurrent.futures.ProcessPoolExecutor() as executor:", "file in manifestFileList if 's2' in file] return manifestFileList def convertToJson(file): try: docJSON", "pass # # resultCount += 1 # finishCheckEng = time.time() # outputFileEnglishDoc =", "in file] return manifestFileList def convertToJson(file): try: docJSON = json.loads(file) return docJSON except", "manifestFileList = manifestFile.split('\\n') manifestFileList = [file for file in manifestFileList if 's2' in", "= codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\") # for englishDocContent in docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close()", "langdetect import detect import time from pprint import pprint import concurrent.futures import codecs", "in results: # if result is not None: # # print(f'{resultCount} - {result}')", "import detect import time from pprint import pprint import concurrent.futures import codecs import", "pass def matchDocWithKeyTerms(file): # print(file) # keyTermsList = ['summarisation', 'summarization', 'nlg', 'extractive', 'summeries',", "matchDocWithKeyTerms(doc) if result is not None: # print(result) docKeyTermMatched.append(result) s2CorpusFname = s2Corpus.split('.')[0] outputFileKeyTermMatched", "= time.time() batch = fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch {batchCount} - Size: {len(batch)}') # #", "= executor.map(getEnglishDoc, batch) # # resultCount = 1 # for result in results:", "manifest manifestFile = open(fileLocation, \"r\") manifestFile = manifestFile.read() manifestFileList = manifestFile.split('\\n') manifestFileList =", "in manifestFileList if 's2' in file] return manifestFileList def convertToJson(file): try: docJSON =", "docListEnglish.append(result) # else: # # print(f'{resultCount} - Non-English Title') # pass # #", "matches by itself docJSON = json.loads(json.dumps(file)) # dumps if input is dict... #", "resultCount = 1 # for result in results: # if result is not", "- Size: {len(batch)}') # # English Sieving # docListEnglish = [] # with", "input is dict... # keyTermsMatched = set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern,", "<gh_stars>0 \"\"\" @author: <NAME> \"\"\" from tqdm import tqdm import gzip import json", "print(f'{resultCount} - Non-English Title') # pass # # resultCount += 1 # finishCheckEng", "f'Year: {docJSON[\"year\"]} - Title: {docJSON[\"title\"]} - ID: {docJSON[\"id\"]}' except Exception as e: #", "in docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close() # finishWriteToDisk = time.time() # print(f'Time to", "as executor: # results = executor.map(getEnglishDoc, batch) # # resultCount = 1 #", "- {result}') # docListEnglish.append(result) # else: # # print(f'{resultCount} - Non-English Title') #", "# results = executor.map(getEnglishDoc, batch) # # resultCount = 1 # for result", "= gzip.open(url, 'rb') file_content = f.read().decode('utf-8') f.close() fileContentsList = file_content.split('\\n') return fileContentsList def", "write to disk {finishWriteToDisk-start:0.1f}') docListJSON = [] with concurrent.futures.ProcessPoolExecutor() as executor: results =", "# docListEnglish = [] # with concurrent.futures.ProcessPoolExecutor() as executor: # results = executor.map(getEnglishDoc,", "pass finishJSONConvert = time.time() print(f'Time to convert to JSON {finishJSONConvert-start:0.1f}') # Key Term", "# resultCount += 1 # finishCheckEng = time.time() # outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\",", "try: docJSON = json.loads(file) if detect(docJSON[\"title\"]) == 'en': # can have errors if", "manifestFile.split('\\n') manifestFileList = [file for file in manifestFileList if 's2' in file] return", "10000 chunks = (len(fileContentsList)-1) // batchSize + 1 batchCount = 1 for i", "gzip.open(url, 'rb') file_content = f.read().decode('utf-8') f.close() fileContentsList = file_content.split('\\n') return fileContentsList def getManifest(fileLocation):", "Non-English Title') # pass # # resultCount += 1 # finishCheckEng = time.time()", "print(file) # keyTermsList = ['summarisation', 'summarization', 'nlg', 'extractive', 'summeries', 'summarizatio'] # spelling problems...;", "massive amounts to each core batchSize = 10000 chunks = (len(fileContentsList)-1) // batchSize", "JSON {finishJSONConvert-start:0.1f}') # Key Term Matching start1 = time.time() docKeyTermMatched = [] for", "finishCheckEng = time.time() # outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\") # for englishDocContent in", "return docJSON except Exception as e: # print(f'ERROR: {e}') pass def getEnglishDoc(file): try:", "# finishCheckEng = time.time() # outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\") # for englishDocContent", "None: docListJSON.append(result) else: pass finishJSONConvert = time.time() print(f'Time to convert to JSON {finishJSONConvert-start:0.1f}')", "s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList = unzipS2Contents(s2CorpusUrl) print(f'Number of files: {len(fileContentsList)}') #", "print('%r %2.2f seconds' % (method.__name__, (te - ts))) return result return timed @timeit", "isn't a title specified. return docJSON # return f'Year: {docJSON[\"year\"]} - Title: {docJSON[\"title\"]}", "executor: # results = executor.map(getEnglishDoc, batch) # # resultCount = 1 # for", "unzipS2Contents(s2CorpusUrl) print(f'Number of files: {len(fileContentsList)}') # Batch processing files rather than distributing massive", "in s2CorpusList: print(f'Processing: {s2Corpus}') s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList = unzipS2Contents(s2CorpusUrl) print(f'Number", "f.close() fileContentsList = file_content.split('\\n') return fileContentsList def getManifest(fileLocation): # Get list of .gz", "of .gz documents from S2 manifest manifestFile = open(fileLocation, \"r\") manifestFile = manifestFile.read()", "docKeyTermMatched = [] for doc in docListJSON: #docListEnglish: result = matchDocWithKeyTerms(doc) if result", "spelling problems...; removed automatic as it's too general when it matches by itself", "print(f'Number of files: {len(fileContentsList)}') # Batch processing files rather than distributing massive amounts", "[] for doc in docListJSON: #docListEnglish: result = matchDocWithKeyTerms(doc) if result is not", "outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\") for docMatched in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1 =", "S2 manifest manifestFile = open(fileLocation, \"r\") manifestFile = manifestFile.read() manifestFileList = manifestFile.split('\\n') manifestFileList", "concurrent.futures import codecs import re def timeit(method): def timed(*args, **kw): ts = time.time()", "getManifest(r\"data/sample/semantic scholar manifest 2020-03.txt\") for s2Corpus in s2CorpusList: print(f'Processing: {s2Corpus}') s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic", "tqdm import gzip import json from langdetect import detect import time from pprint", "of files: {len(fileContentsList)}') # Batch processing files rather than distributing massive amounts to", "'nlg', 'extractive', 'summeries', 'summarizatio'] # spelling problems...; removed automatic as it's too general", "time.time() # print(f'Time to check english {finishCheckEng-start:0.1f}') # print(f'Time to write to disk", "open(fileLocation, \"r\") manifestFile = manifestFile.read() manifestFileList = manifestFile.split('\\n') manifestFileList = [file for file", "pprint import pprint import concurrent.futures import codecs import re def timeit(method): def timed(*args,", "1 batchCount = 1 for i in range(chunks): start = time.time() batch =", "json.loads(file) return docJSON except Exception as e: # print(f'ERROR: {e}') pass def getEnglishDoc(file):", "# print(f'{resultCount} - Non-English Title') # pass # # resultCount += 1 #", "#docListEnglish: result = matchDocWithKeyTerms(doc) if result is not None: # print(result) docKeyTermMatched.append(result) s2CorpusFname", "print(result) docKeyTermMatched.append(result) s2CorpusFname = s2Corpus.split('.')[0] outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\") for docMatched in", "from tqdm import tqdm import gzip import json from langdetect import detect import", "def timeit(method): def timed(*args, **kw): ts = time.time() result = method(*args, **kw) te", "s2Corpus.split('.')[0] outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\") for docMatched in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1", "pprint import concurrent.futures import codecs import re def timeit(method): def timed(*args, **kw): ts", "@timeit def unzipS2Contents(url): f = gzip.open(url, 'rb') file_content = f.read().decode('utf-8') f.close() fileContentsList =", "finishJSONConvert = time.time() print(f'Time to convert to JSON {finishJSONConvert-start:0.1f}') # Key Term Matching", "f.read().decode('utf-8') f.close() fileContentsList = file_content.split('\\n') return fileContentsList def getManifest(fileLocation): # Get list of", "pass def getEnglishDoc(file): try: docJSON = json.loads(file) if detect(docJSON[\"title\"]) == 'en': # can", "# docListEnglish.append(result) # else: # # print(f'{resultCount} - Non-English Title') # pass #", "docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1 = time.time() print(f'Time to extract keyterms {finish1-start1:0.1f}') batchCount +=", "def timed(*args, **kw): ts = time.time() result = method(*args, **kw) te = time.time()", "doc in docListJSON: #docListEnglish: result = matchDocWithKeyTerms(doc) if result is not None: #", "// batchSize + 1 batchCount = 1 for i in range(chunks): start =", "= fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch {batchCount} - Size: {len(batch)}') # # English Sieving #", "return fileContentsList def getManifest(fileLocation): # Get list of .gz documents from S2 manifest", "except Exception as e: # print(f'ERROR: {e}') pass def getEnglishDoc(file): try: docJSON =", "print(f'Time to check english {finishCheckEng-start:0.1f}') # print(f'Time to write to disk {finishWriteToDisk-start:0.1f}') docListJSON", "time.time() print(f'Time to extract keyterms {finish1-start1:0.1f}') batchCount += 1 if __name__ == '__main__':", "'extractive', 'summeries', 'summarizatio'] # spelling problems...; removed automatic as it's too general when", "= ['summarisation', 'summarization', 'nlg', 'extractive', 'summeries', 'summarizatio'] # spelling problems...; removed automatic as", "there isn't a title specified. return docJSON # return f'Year: {docJSON[\"year\"]} - Title:", "when it matches by itself docJSON = json.loads(json.dumps(file)) # dumps if input is", "return result return timed @timeit def unzipS2Contents(url): f = gzip.open(url, 'rb') file_content =", "# keyTermsList = ['summarisation', 'summarization', 'nlg', 'extractive', 'summeries', 'summarizatio'] # spelling problems...; removed", "result in results: if result is not None: docListJSON.append(result) else: pass finishJSONConvert =", "keyTermsList = ['summarisation', 'summarization', 'nlg', 'extractive', 'summeries', 'summarizatio'] # spelling problems...; removed automatic", "range(chunks): start = time.time() batch = fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch {batchCount} - Size: {len(batch)}')", "(len(fileContentsList)-1) // batchSize + 1 batchCount = 1 for i in range(chunks): start", "@timeit def main(): s2CorpusList = getManifest(r\"data/sample/semantic scholar manifest 2020-03.txt\") for s2Corpus in s2CorpusList:", "from pprint import pprint import concurrent.futures import codecs import re def timeit(method): def", "method(*args, **kw) te = time.time() print('%r %2.2f seconds' % (method.__name__, (te - ts)))", "= time.time() result = method(*args, **kw) te = time.time() print('%r %2.2f seconds' %", "[] # with concurrent.futures.ProcessPoolExecutor() as executor: # results = executor.map(getEnglishDoc, batch) # #", "set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern, docJSON[\"title\"].lower())) or (re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED:", "= 1 # for result in results: # if result is not None:", "English Sieving # docListEnglish = [] # with concurrent.futures.ProcessPoolExecutor() as executor: # results", "main(): s2CorpusList = getManifest(r\"data/sample/semantic scholar manifest 2020-03.txt\") for s2Corpus in s2CorpusList: print(f'Processing: {s2Corpus}')", "{finishCheckEng-start:0.1f}') # print(f'Time to write to disk {finishWriteToDisk-start:0.1f}') docListJSON = [] with concurrent.futures.ProcessPoolExecutor()", "# print(file) # keyTermsList = ['summarisation', 'summarization', 'nlg', 'extractive', 'summeries', 'summarizatio'] # spelling", "Size: {len(batch)}') # # English Sieving # docListEnglish = [] # with concurrent.futures.ProcessPoolExecutor()", "executor: results = executor.map(convertToJson, batch) for result in results: if result is not", "= time.time() print(f'Time to convert to JSON {finishJSONConvert-start:0.1f}') # Key Term Matching start1", "s2Corpus in s2CorpusList: print(f'Processing: {s2Corpus}') s2CorpusUrl = fr\"C:\\Users\\22917746\\Desktop\\Semantic Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList = unzipS2Contents(s2CorpusUrl)", "(te - ts))) return result return timed @timeit def unzipS2Contents(url): f = gzip.open(url,", "json.loads(file) if detect(docJSON[\"title\"]) == 'en': # can have errors if there isn't a", "for docMatched in docKeyTermMatched: outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1 = time.time() print(f'Time to extract keyterms", "distributing massive amounts to each core batchSize = 10000 chunks = (len(fileContentsList)-1) //", "dumps if input is dict... # keyTermsMatched = set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern = \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\"", "for i in range(chunks): start = time.time() batch = fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch {batchCount}", "{docJSON[\"title\"]}') return docJSON else: pass @timeit def main(): s2CorpusList = getManifest(r\"data/sample/semantic scholar manifest", "= unzipS2Contents(s2CorpusUrl) print(f'Number of files: {len(fileContentsList)}') # Batch processing files rather than distributing", "outputFileKeyTermMatched.writelines(f'{docMatched}\\n') outputFileKeyTermMatched.close() finish1 = time.time() print(f'Time to extract keyterms {finish1-start1:0.1f}') batchCount += 1", "too general when it matches by itself docJSON = json.loads(json.dumps(file)) # dumps if", "= 10000 chunks = (len(fileContentsList)-1) // batchSize + 1 batchCount = 1 for", "docKeyTermMatched.append(result) s2CorpusFname = s2Corpus.split('.')[0] outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\", \"utf-8\") for docMatched in docKeyTermMatched:", "(method.__name__, (te - ts))) return result return timed @timeit def unzipS2Contents(url): f =", "{docJSON[\"id\"]}' except Exception as e: # print(f'ERROR: {e}') pass def matchDocWithKeyTerms(file): # print(file)", "to convert to JSON {finishJSONConvert-start:0.1f}') # Key Term Matching start1 = time.time() docKeyTermMatched", "batchSize = 10000 chunks = (len(fileContentsList)-1) // batchSize + 1 batchCount = 1", "it matches by itself docJSON = json.loads(json.dumps(file)) # dumps if input is dict...", "documents from S2 manifest manifestFile = open(fileLocation, \"r\") manifestFile = manifestFile.read() manifestFileList =", "def getEnglishDoc(file): try: docJSON = json.loads(file) if detect(docJSON[\"title\"]) == 'en': # can have", "\"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern, docJSON[\"title\"].lower())) or (re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED: {docJSON[\"title\"]}') return docJSON else:", "specified. return docJSON # return f'Year: {docJSON[\"year\"]} - Title: {docJSON[\"title\"]} - ID: {docJSON[\"id\"]}'", "not None: # # print(f'{resultCount} - {result}') # docListEnglish.append(result) # else: # #", "as e: # print(f'ERROR: {e}') pass def getEnglishDoc(file): try: docJSON = json.loads(file) if", "# # resultCount += 1 # finishCheckEng = time.time() # outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\",", "result is not None: # print(result) docKeyTermMatched.append(result) s2CorpusFname = s2Corpus.split('.')[0] outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\",", "docJSON[\"title\"].lower())) or (re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED: {docJSON[\"title\"]}') return docJSON else: pass @timeit def", "list of .gz documents from S2 manifest manifestFile = open(fileLocation, \"r\") manifestFile =", "files rather than distributing massive amounts to each core batchSize = 10000 chunks", "unzipS2Contents(url): f = gzip.open(url, 'rb') file_content = f.read().decode('utf-8') f.close() fileContentsList = file_content.split('\\n') return", "to disk {finishWriteToDisk-start:0.1f}') docListJSON = [] with concurrent.futures.ProcessPoolExecutor() as executor: results = executor.map(convertToJson,", "Exception as e: # print(f'ERROR: {e}') pass def matchDocWithKeyTerms(file): # print(file) # keyTermsList", "timeit(method): def timed(*args, **kw): ts = time.time() result = method(*args, **kw) te =", "i in range(chunks): start = time.time() batch = fileContentsList[i*batchSize:(i+1)*batchSize] print(f'\\nProcessing batch {batchCount} -", "import tqdm import gzip import json from langdetect import detect import time from", "rather than distributing massive amounts to each core batchSize = 10000 chunks =", "time.time() result = method(*args, **kw) te = time.time() print('%r %2.2f seconds' % (method.__name__,", "else: # # print(f'{resultCount} - Non-English Title') # pass # # resultCount +=", "# English Sieving # docListEnglish = [] # with concurrent.futures.ProcessPoolExecutor() as executor: #", "is not None: # print(result) docKeyTermMatched.append(result) s2CorpusFname = s2Corpus.split('.')[0] outputFileKeyTermMatched = codecs.open(f\"englishDocsKeyTermMatched_{s2CorpusFname}_{batchCount}.txt\", \"w\",", "import gzip import json from langdetect import detect import time from pprint import", "= time.time() print('%r %2.2f seconds' % (method.__name__, (te - ts))) return result return", "getEnglishDoc(file): try: docJSON = json.loads(file) if detect(docJSON[\"title\"]) == 'en': # can have errors", "manifestFile.read() manifestFileList = manifestFile.split('\\n') manifestFileList = [file for file in manifestFileList if 's2'", "check english {finishCheckEng-start:0.1f}') # print(f'Time to write to disk {finishWriteToDisk-start:0.1f}') docListJSON = []", "\"\"\" from tqdm import tqdm import gzip import json from langdetect import detect", "{len(fileContentsList)}') # Batch processing files rather than distributing massive amounts to each core", "codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\") # for englishDocContent in docListEnglish: # outputFileEnglishDoc.writelines(f'{englishDocContent}\\n') # outputFileEnglishDoc.close() #", "'rb') file_content = f.read().decode('utf-8') f.close() fileContentsList = file_content.split('\\n') return fileContentsList def getManifest(fileLocation): #", "manifestFile = manifestFile.read() manifestFileList = manifestFile.split('\\n') manifestFileList = [file for file in manifestFileList", "= \"(summarization|summarisation|text|nlg|nlp)|(automatic)[^.]*(generation|summary|summarization|summarisation)|(abstractive|extractive)[^.]*(model|summary|modelling|modeling|summarization|summarisation|processing)|(semantic)[^.]*(retrieval|graph|model|modelling|modeling|summarization|summarisation|processing|representations)|(natural|language)[^.]*(language|generation|processing)|(information)[^.]*(retrieval|graph|summary|summarization|summarisation)\" if (re.match(pattern, docJSON[\"title\"].lower())) or (re.match(pattern, docJSON[\"paperAbstract\"].lower())): print(f'DOC MATCHED: {docJSON[\"title\"]}') return docJSON", "result return timed @timeit def unzipS2Contents(url): f = gzip.open(url, 'rb') file_content = f.read().decode('utf-8')", "'summarization', 'nlg', 'extractive', 'summeries', 'summarizatio'] # spelling problems...; removed automatic as it's too", "# else: # # print(f'{resultCount} - Non-English Title') # pass # # resultCount", "if result is not None: # # print(f'{resultCount} - {result}') # docListEnglish.append(result) #", "e: # print(f'ERROR: {e}') pass def getEnglishDoc(file): try: docJSON = json.loads(file) if detect(docJSON[\"title\"])", "# pass # # resultCount += 1 # finishCheckEng = time.time() # outputFileEnglishDoc", "print(f'Time to convert to JSON {finishJSONConvert-start:0.1f}') # Key Term Matching start1 = time.time()", "docListEnglish = [] # with concurrent.futures.ProcessPoolExecutor() as executor: # results = executor.map(getEnglishDoc, batch)", "{result}') # docListEnglish.append(result) # else: # # print(f'{resultCount} - Non-English Title') # pass", "import time from pprint import pprint import concurrent.futures import codecs import re def", "= [] # with concurrent.futures.ProcessPoolExecutor() as executor: # results = executor.map(getEnglishDoc, batch) #", "**kw): ts = time.time() result = method(*args, **kw) te = time.time() print('%r %2.2f", "automatic as it's too general when it matches by itself docJSON = json.loads(json.dumps(file))", "results = executor.map(getEnglishDoc, batch) # # resultCount = 1 # for result in", "docJSON = json.loads(file) return docJSON except Exception as e: # print(f'ERROR: {e}') pass", "# dumps if input is dict... # keyTermsMatched = set(docJSON[\"title\"].lower().split(' ')).intersection(set(keyTermsList)) pattern =", "docListJSON: #docListEnglish: result = matchDocWithKeyTerms(doc) if result is not None: # print(result) docKeyTermMatched.append(result)", "batch) for result in results: if result is not None: docListJSON.append(result) else: pass", "is not None: # # print(f'{resultCount} - {result}') # docListEnglish.append(result) # else: #", "as executor: results = executor.map(convertToJson, batch) for result in results: if result is", "f = gzip.open(url, 'rb') file_content = f.read().decode('utf-8') f.close() fileContentsList = file_content.split('\\n') return fileContentsList", "docJSON else: pass @timeit def main(): s2CorpusList = getManifest(r\"data/sample/semantic scholar manifest 2020-03.txt\") for", "resultCount += 1 # finishCheckEng = time.time() # outputFileEnglishDoc = codecs.open(f\"englishDocs_{batchCount}.txt\", \"w\", \"utf-8\")", "# can have errors if there isn't a title specified. return docJSON #", "fileContentsList = unzipS2Contents(s2CorpusUrl) print(f'Number of files: {len(fileContentsList)}') # Batch processing files rather than", "te = time.time() print('%r %2.2f seconds' % (method.__name__, (te - ts))) return result", "print(f'ERROR: {e}') pass def matchDocWithKeyTerms(file): # print(file) # keyTermsList = ['summarisation', 'summarization', 'nlg',", "import json from langdetect import detect import time from pprint import pprint import", "have errors if there isn't a title specified. return docJSON # return f'Year:", "Scholar EDA\\data\\raw\\{s2Corpus}\" fileContentsList = unzipS2Contents(s2CorpusUrl) print(f'Number of files: {len(fileContentsList)}') # Batch processing files", "manifestFileList if 's2' in file] return manifestFileList def convertToJson(file): try: docJSON = json.loads(file)", "# with concurrent.futures.ProcessPoolExecutor() as executor: # results = executor.map(getEnglishDoc, batch) # # resultCount", "<NAME> \"\"\" from tqdm import tqdm import gzip import json from langdetect import", "itself docJSON = json.loads(json.dumps(file)) # dumps if input is dict... # keyTermsMatched =", "# print(f'{resultCount} - {result}') # docListEnglish.append(result) # else: # # print(f'{resultCount} - Non-English", "processing files rather than distributing massive amounts to each core batchSize = 10000", "Title: {docJSON[\"title\"]} - ID: {docJSON[\"id\"]}' except Exception as e: # print(f'ERROR: {e}') pass", "import concurrent.futures import codecs import re def timeit(method): def timed(*args, **kw): ts =", "general when it matches by itself docJSON = json.loads(json.dumps(file)) # dumps if input", "= json.loads(file) if detect(docJSON[\"title\"]) == 'en': # can have errors if there isn't" ]
[ "\"***\", \"strike through\": \"~~\", \"comment\": \"/*\", # Common stuff # \"link\": \"link\", \"image\":", "r\"\"\" tag: user.vim_ultisnips mode: user.markdown mode: command and code.language: markdown \"\"\" # spoken", "Paragraphs # \"section\": \"sec\", \"sub section\": \"ssec\", \"sub sub section\": \"sssec\", \"paragraph\": \"par\",", "# Sections and Paragraphs # \"section\": \"sec\", \"sub section\": \"ssec\", \"sub sub section\":", "ctx = Context() ctx.matches = r\"\"\" tag: user.vim_ultisnips mode: user.markdown mode: command and", "spoken name -> ultisnips snippet name ctx.lists[\"user.snippets\"] = { # Sections and Paragraphs", "mode: command and code.language: markdown \"\"\" # spoken name -> ultisnips snippet name", "= Context() ctx.matches = r\"\"\" tag: user.vim_ultisnips mode: user.markdown mode: command and code.language:", "code.language: markdown \"\"\" # spoken name -> ultisnips snippet name ctx.lists[\"user.snippets\"] = {", "\"\"\" # spoken name -> ultisnips snippet name ctx.lists[\"user.snippets\"] = { # Sections", "\"sub paragraph\": \"spar\", # Text formatting # \"italics\": \"*\", \"bold\": \"**\", \"bold italics\":", "block\": \"cbl\", \"shell block\": \"shellcbl\", \"reference link\": \"refl\", \"footnote\": \"fnt\", \"detail\": \"detail\", }", "\"strike through\": \"~~\", \"comment\": \"/*\", # Common stuff # \"link\": \"link\", \"image\": \"img\",", "Sections and Paragraphs # \"section\": \"sec\", \"sub section\": \"ssec\", \"sub sub section\": \"sssec\",", "-> ultisnips snippet name ctx.lists[\"user.snippets\"] = { # Sections and Paragraphs # \"section\":", "# \"italics\": \"*\", \"bold\": \"**\", \"bold italics\": \"***\", \"strike through\": \"~~\", \"comment\": \"/*\",", "from talon import Context ctx = Context() ctx.matches = r\"\"\" tag: user.vim_ultisnips mode:", "snippet name ctx.lists[\"user.snippets\"] = { # Sections and Paragraphs # \"section\": \"sec\", \"sub", "# Common stuff # \"link\": \"link\", \"image\": \"img\", \"inline code\": \"ilc\", \"code block\":", "tag: user.vim_ultisnips mode: user.markdown mode: command and code.language: markdown \"\"\" # spoken name", "# \"link\": \"link\", \"image\": \"img\", \"inline code\": \"ilc\", \"code block\": \"cbl\", \"shell block\":", "code\": \"ilc\", \"code block\": \"cbl\", \"shell block\": \"shellcbl\", \"reference link\": \"refl\", \"footnote\": \"fnt\",", "paragraph\": \"spar\", # Text formatting # \"italics\": \"*\", \"bold\": \"**\", \"bold italics\": \"***\",", "\"bold italics\": \"***\", \"strike through\": \"~~\", \"comment\": \"/*\", # Common stuff # \"link\":", "\"sec\", \"sub section\": \"ssec\", \"sub sub section\": \"sssec\", \"paragraph\": \"par\", \"sub paragraph\": \"spar\",", "and Paragraphs # \"section\": \"sec\", \"sub section\": \"ssec\", \"sub sub section\": \"sssec\", \"paragraph\":", "\"*\", \"bold\": \"**\", \"bold italics\": \"***\", \"strike through\": \"~~\", \"comment\": \"/*\", # Common", "# \"section\": \"sec\", \"sub section\": \"ssec\", \"sub sub section\": \"sssec\", \"paragraph\": \"par\", \"sub", "= r\"\"\" tag: user.vim_ultisnips mode: user.markdown mode: command and code.language: markdown \"\"\" #", "mode: user.markdown mode: command and code.language: markdown \"\"\" # spoken name -> ultisnips", "talon import Context ctx = Context() ctx.matches = r\"\"\" tag: user.vim_ultisnips mode: user.markdown", "# Text formatting # \"italics\": \"*\", \"bold\": \"**\", \"bold italics\": \"***\", \"strike through\":", "\"link\": \"link\", \"image\": \"img\", \"inline code\": \"ilc\", \"code block\": \"cbl\", \"shell block\": \"shellcbl\",", "import Context ctx = Context() ctx.matches = r\"\"\" tag: user.vim_ultisnips mode: user.markdown mode:", "{ # Sections and Paragraphs # \"section\": \"sec\", \"sub section\": \"ssec\", \"sub sub", "\"comment\": \"/*\", # Common stuff # \"link\": \"link\", \"image\": \"img\", \"inline code\": \"ilc\",", "Common stuff # \"link\": \"link\", \"image\": \"img\", \"inline code\": \"ilc\", \"code block\": \"cbl\",", "\"image\": \"img\", \"inline code\": \"ilc\", \"code block\": \"cbl\", \"shell block\": \"shellcbl\", \"reference link\":", "\"par\", \"sub paragraph\": \"spar\", # Text formatting # \"italics\": \"*\", \"bold\": \"**\", \"bold", "\"spar\", # Text formatting # \"italics\": \"*\", \"bold\": \"**\", \"bold italics\": \"***\", \"strike", "\"ilc\", \"code block\": \"cbl\", \"shell block\": \"shellcbl\", \"reference link\": \"refl\", \"footnote\": \"fnt\", \"detail\":", "Context ctx = Context() ctx.matches = r\"\"\" tag: user.vim_ultisnips mode: user.markdown mode: command", "section\": \"ssec\", \"sub sub section\": \"sssec\", \"paragraph\": \"par\", \"sub paragraph\": \"spar\", # Text", "formatting # \"italics\": \"*\", \"bold\": \"**\", \"bold italics\": \"***\", \"strike through\": \"~~\", \"comment\":", "\"**\", \"bold italics\": \"***\", \"strike through\": \"~~\", \"comment\": \"/*\", # Common stuff #", "stuff # \"link\": \"link\", \"image\": \"img\", \"inline code\": \"ilc\", \"code block\": \"cbl\", \"shell", "through\": \"~~\", \"comment\": \"/*\", # Common stuff # \"link\": \"link\", \"image\": \"img\", \"inline", "\"/*\", # Common stuff # \"link\": \"link\", \"image\": \"img\", \"inline code\": \"ilc\", \"code", "markdown \"\"\" # spoken name -> ultisnips snippet name ctx.lists[\"user.snippets\"] = { #", "\"~~\", \"comment\": \"/*\", # Common stuff # \"link\": \"link\", \"image\": \"img\", \"inline code\":", "user.vim_ultisnips mode: user.markdown mode: command and code.language: markdown \"\"\" # spoken name ->", "and code.language: markdown \"\"\" # spoken name -> ultisnips snippet name ctx.lists[\"user.snippets\"] =", "section\": \"sssec\", \"paragraph\": \"par\", \"sub paragraph\": \"spar\", # Text formatting # \"italics\": \"*\",", "user.markdown mode: command and code.language: markdown \"\"\" # spoken name -> ultisnips snippet", "sub section\": \"sssec\", \"paragraph\": \"par\", \"sub paragraph\": \"spar\", # Text formatting # \"italics\":", "name -> ultisnips snippet name ctx.lists[\"user.snippets\"] = { # Sections and Paragraphs #", "\"italics\": \"*\", \"bold\": \"**\", \"bold italics\": \"***\", \"strike through\": \"~~\", \"comment\": \"/*\", #", "\"sub section\": \"ssec\", \"sub sub section\": \"sssec\", \"paragraph\": \"par\", \"sub paragraph\": \"spar\", #", "ultisnips snippet name ctx.lists[\"user.snippets\"] = { # Sections and Paragraphs # \"section\": \"sec\",", "italics\": \"***\", \"strike through\": \"~~\", \"comment\": \"/*\", # Common stuff # \"link\": \"link\",", "\"inline code\": \"ilc\", \"code block\": \"cbl\", \"shell block\": \"shellcbl\", \"reference link\": \"refl\", \"footnote\":", "ctx.matches = r\"\"\" tag: user.vim_ultisnips mode: user.markdown mode: command and code.language: markdown \"\"\"", "\"ssec\", \"sub sub section\": \"sssec\", \"paragraph\": \"par\", \"sub paragraph\": \"spar\", # Text formatting", "\"sssec\", \"paragraph\": \"par\", \"sub paragraph\": \"spar\", # Text formatting # \"italics\": \"*\", \"bold\":", "\"code block\": \"cbl\", \"shell block\": \"shellcbl\", \"reference link\": \"refl\", \"footnote\": \"fnt\", \"detail\": \"detail\",", "= { # Sections and Paragraphs # \"section\": \"sec\", \"sub section\": \"ssec\", \"sub", "\"section\": \"sec\", \"sub section\": \"ssec\", \"sub sub section\": \"sssec\", \"paragraph\": \"par\", \"sub paragraph\":", "\"paragraph\": \"par\", \"sub paragraph\": \"spar\", # Text formatting # \"italics\": \"*\", \"bold\": \"**\",", "\"link\", \"image\": \"img\", \"inline code\": \"ilc\", \"code block\": \"cbl\", \"shell block\": \"shellcbl\", \"reference", "\"bold\": \"**\", \"bold italics\": \"***\", \"strike through\": \"~~\", \"comment\": \"/*\", # Common stuff", "ctx.lists[\"user.snippets\"] = { # Sections and Paragraphs # \"section\": \"sec\", \"sub section\": \"ssec\",", "# spoken name -> ultisnips snippet name ctx.lists[\"user.snippets\"] = { # Sections and", "\"img\", \"inline code\": \"ilc\", \"code block\": \"cbl\", \"shell block\": \"shellcbl\", \"reference link\": \"refl\",", "command and code.language: markdown \"\"\" # spoken name -> ultisnips snippet name ctx.lists[\"user.snippets\"]", "Text formatting # \"italics\": \"*\", \"bold\": \"**\", \"bold italics\": \"***\", \"strike through\": \"~~\",", "\"sub sub section\": \"sssec\", \"paragraph\": \"par\", \"sub paragraph\": \"spar\", # Text formatting #", "Context() ctx.matches = r\"\"\" tag: user.vim_ultisnips mode: user.markdown mode: command and code.language: markdown", "name ctx.lists[\"user.snippets\"] = { # Sections and Paragraphs # \"section\": \"sec\", \"sub section\":" ]
[ "= np.array( [get_image(files, width, height, mode) for files in images]).astype(np.float32) # Make sure", "image = Image.open(image_path) return np.array(image.convert(mode)) def get_batch(images, width, height, mode): data_batch = np.array(", "[get_image(files, width, height, mode) for files in images]).astype(np.float32) # Make sure the images", "images.min())).astype(np.uint8) # arrange images in square images_in_square = np.reshape( images[:size * size], (size,", "4) new_im = Image.new(mode, (images.shape[1] * size, images.shape[2] * size)) # combine images", "mode) new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2])) return new_im print(new_im, images.shape[1] *", "images]).astype(np.float32) # Make sure the images are in 4 dimensions if len(data_batch.shape) <", "= Image.fromarray(image, mode) new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2])) return new_im print(new_im,", "of grid size = math.floor(np.sqrt(images.shape[0])) # scale 0 to 255 rgb color images", "size of grid size = math.floor(np.sqrt(images.shape[0])) # scale 0 to 255 rgb color", "data_batch = data_batch.reshape(data_batch.shape + (1,)) return data_batch def images_to_grid(images, mode): # size of", "color images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8) #", "np.array( [get_image(files, width, height, mode) for files in images]).astype(np.float32) # Make sure the", "images.shape[3])) if mode == 'L': images_in_square = np.squeeze(images_in_square, 4) new_im = Image.new(mode, (images.shape[1]", "- images.min())).astype(np.uint8) # arrange images in square images_in_square = np.reshape( images[:size * size],", "height, mode) for files in images]).astype(np.float32) # Make sure the images are in", "image in enumerate(col_images): im = Image.fromarray(image, mode) new_im.paste(im, (col_i * images.shape[1], image_i *", "enumerate(col_images): im = Image.fromarray(image, mode) new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2])) return", "im = Image.fromarray(image, mode) new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2])) return new_im", "= math.floor(np.sqrt(images.shape[0])) # scale 0 to 255 rgb color images = (((images -", "images[:size * size], (size, size, images.shape[1], images.shape[2], images.shape[3])) if mode == 'L': images_in_square", "4: data_batch = data_batch.reshape(data_batch.shape + (1,)) return data_batch def images_to_grid(images, mode): # size", "* size], (size, size, images.shape[1], images.shape[2], images.shape[3])) if mode == 'L': images_in_square =", "mode): \"\"\" Read image :param image_path: path of image :param width: width of", "Image.fromarray(image, mode) new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2])) return new_im print(new_im, images.shape[1]", "- images.min()) * 255) / (images.max() - images.min())).astype(np.uint8) # arrange images in square", ":return Image data \"\"\" image = Image.open(image_path) return np.array(image.convert(mode)) def get_batch(images, width, height,", "for col_i, col_images in enumerate(images_in_square): for image_i, image in enumerate(col_images): im = Image.fromarray(image,", "Image.new(mode, (images.shape[1] * size, images.shape[2] * size)) # combine images for col_i, col_images", "PIL import Image import math def get_image(image_path, width, height, mode): \"\"\" Read image", "images_in_square = np.squeeze(images_in_square, 4) new_im = Image.new(mode, (images.shape[1] * size, images.shape[2] * size))", "of image :param width: width of image :param height: height of image :param", "height, mode): data_batch = np.array( [get_image(files, width, height, mode) for files in images]).astype(np.float32)", "as np from PIL import Image import math def get_image(image_path, width, height, mode):", "(((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8) # arrange images in", "\"\"\" Read image :param image_path: path of image :param width: width of image", "np from PIL import Image import math def get_image(image_path, width, height, mode): \"\"\"", "new_im = Image.new(mode, (images.shape[1] * size, images.shape[2] * size)) # combine images for", "np.reshape( images[:size * size], (size, size, images.shape[1], images.shape[2], images.shape[3])) if mode == 'L':", "square images_in_square = np.reshape( images[:size * size], (size, size, images.shape[1], images.shape[2], images.shape[3])) if", "= np.reshape( images[:size * size], (size, size, images.shape[1], images.shape[2], images.shape[3])) if mode ==", "in images]).astype(np.float32) # Make sure the images are in 4 dimensions if len(data_batch.shape)", "image :param mode: image mode :return Image data \"\"\" image = Image.open(image_path) return", "images_to_grid(images, mode): # size of grid size = math.floor(np.sqrt(images.shape[0])) # scale 0 to", "(1,)) return data_batch def images_to_grid(images, mode): # size of grid size = math.floor(np.sqrt(images.shape[0]))", "col_images in enumerate(images_in_square): for image_i, image in enumerate(col_images): im = Image.fromarray(image, mode) new_im.paste(im,", "height: height of image :param mode: image mode :return Image data \"\"\" image", ":param image_path: path of image :param width: width of image :param height: height", "arrange images in square images_in_square = np.reshape( images[:size * size], (size, size, images.shape[1],", "def get_batch(images, width, height, mode): data_batch = np.array( [get_image(files, width, height, mode) for", "(images.max() - images.min())).astype(np.uint8) # arrange images in square images_in_square = np.reshape( images[:size *", "0 to 255 rgb color images = (((images - images.min()) * 255) /", "* size, images.shape[2] * size)) # combine images for col_i, col_images in enumerate(images_in_square):", "if len(data_batch.shape) < 4: data_batch = data_batch.reshape(data_batch.shape + (1,)) return data_batch def images_to_grid(images,", "# Make sure the images are in 4 dimensions if len(data_batch.shape) < 4:", "images for col_i, col_images in enumerate(images_in_square): for image_i, image in enumerate(col_images): im =", "+ (1,)) return data_batch def images_to_grid(images, mode): # size of grid size =", "width, height, mode) for files in images]).astype(np.float32) # Make sure the images are", "images in square images_in_square = np.reshape( images[:size * size], (size, size, images.shape[1], images.shape[2],", "mode == 'L': images_in_square = np.squeeze(images_in_square, 4) new_im = Image.new(mode, (images.shape[1] * size,", "Read image :param image_path: path of image :param width: width of image :param", "enumerate(images_in_square): for image_i, image in enumerate(col_images): im = Image.fromarray(image, mode) new_im.paste(im, (col_i *", "mode): # size of grid size = math.floor(np.sqrt(images.shape[0])) # scale 0 to 255", "image :param image_path: path of image :param width: width of image :param height:", "are in 4 dimensions if len(data_batch.shape) < 4: data_batch = data_batch.reshape(data_batch.shape + (1,))", "image mode :return Image data \"\"\" image = Image.open(image_path) return np.array(image.convert(mode)) def get_batch(images,", "images.shape[2] * size)) # combine images for col_i, col_images in enumerate(images_in_square): for image_i,", "= (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8) # arrange images", "'L': images_in_square = np.squeeze(images_in_square, 4) new_im = Image.new(mode, (images.shape[1] * size, images.shape[2] *", "Image import math def get_image(image_path, width, height, mode): \"\"\" Read image :param image_path:", "data \"\"\" image = Image.open(image_path) return np.array(image.convert(mode)) def get_batch(images, width, height, mode): data_batch", "get_batch(images, width, height, mode): data_batch = np.array( [get_image(files, width, height, mode) for files", "images.shape[2], images.shape[3])) if mode == 'L': images_in_square = np.squeeze(images_in_square, 4) new_im = Image.new(mode,", "of image :param mode: image mode :return Image data \"\"\" image = Image.open(image_path)", "mode: image mode :return Image data \"\"\" image = Image.open(image_path) return np.array(image.convert(mode)) def", "image :param width: width of image :param height: height of image :param mode:", "4 dimensions if len(data_batch.shape) < 4: data_batch = data_batch.reshape(data_batch.shape + (1,)) return data_batch", "images are in 4 dimensions if len(data_batch.shape) < 4: data_batch = data_batch.reshape(data_batch.shape +", "in 4 dimensions if len(data_batch.shape) < 4: data_batch = data_batch.reshape(data_batch.shape + (1,)) return", "= np.squeeze(images_in_square, 4) new_im = Image.new(mode, (images.shape[1] * size, images.shape[2] * size)) #", "255 rgb color images = (((images - images.min()) * 255) / (images.max() -", "mode) for files in images]).astype(np.float32) # Make sure the images are in 4", "for image_i, image in enumerate(col_images): im = Image.fromarray(image, mode) new_im.paste(im, (col_i * images.shape[1],", "for files in images]).astype(np.float32) # Make sure the images are in 4 dimensions", "255) / (images.max() - images.min())).astype(np.uint8) # arrange images in square images_in_square = np.reshape(", "height of image :param mode: image mode :return Image data \"\"\" image =", "* size)) # combine images for col_i, col_images in enumerate(images_in_square): for image_i, image", "\"\"\" image = Image.open(image_path) return np.array(image.convert(mode)) def get_batch(images, width, height, mode): data_batch =", "width, height, mode): \"\"\" Read image :param image_path: path of image :param width:", "to 255 rgb color images = (((images - images.min()) * 255) / (images.max()", "# combine images for col_i, col_images in enumerate(images_in_square): for image_i, image in enumerate(col_images):", "size)) # combine images for col_i, col_images in enumerate(images_in_square): for image_i, image in", "mode :return Image data \"\"\" image = Image.open(image_path) return np.array(image.convert(mode)) def get_batch(images, width,", "Image.open(image_path) return np.array(image.convert(mode)) def get_batch(images, width, height, mode): data_batch = np.array( [get_image(files, width,", "dimensions if len(data_batch.shape) < 4: data_batch = data_batch.reshape(data_batch.shape + (1,)) return data_batch def", "in enumerate(col_images): im = Image.fromarray(image, mode) new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))", "# scale 0 to 255 rgb color images = (((images - images.min()) *", "in enumerate(images_in_square): for image_i, image in enumerate(col_images): im = Image.fromarray(image, mode) new_im.paste(im, (col_i", "# arrange images in square images_in_square = np.reshape( images[:size * size], (size, size,", "size, images.shape[2] * size)) # combine images for col_i, col_images in enumerate(images_in_square): for", "height, mode): \"\"\" Read image :param image_path: path of image :param width: width", "< 4: data_batch = data_batch.reshape(data_batch.shape + (1,)) return data_batch def images_to_grid(images, mode): #", "path of image :param width: width of image :param height: height of image", "np.array(image.convert(mode)) def get_batch(images, width, height, mode): data_batch = np.array( [get_image(files, width, height, mode)", "size, images.shape[1], images.shape[2], images.shape[3])) if mode == 'L': images_in_square = np.squeeze(images_in_square, 4) new_im", "col_i, col_images in enumerate(images_in_square): for image_i, image in enumerate(col_images): im = Image.fromarray(image, mode)", "<filename>helper.py<gh_stars>0 import numpy as np from PIL import Image import math def get_image(image_path,", "= Image.new(mode, (images.shape[1] * size, images.shape[2] * size)) # combine images for col_i,", "from PIL import Image import math def get_image(image_path, width, height, mode): \"\"\" Read", "Image data \"\"\" image = Image.open(image_path) return np.array(image.convert(mode)) def get_batch(images, width, height, mode):", "get_image(image_path, width, height, mode): \"\"\" Read image :param image_path: path of image :param", "* 255) / (images.max() - images.min())).astype(np.uint8) # arrange images in square images_in_square =", "scale 0 to 255 rgb color images = (((images - images.min()) * 255)", "np.squeeze(images_in_square, 4) new_im = Image.new(mode, (images.shape[1] * size, images.shape[2] * size)) # combine", "mode): data_batch = np.array( [get_image(files, width, height, mode) for files in images]).astype(np.float32) #", "/ (images.max() - images.min())).astype(np.uint8) # arrange images in square images_in_square = np.reshape( images[:size", "== 'L': images_in_square = np.squeeze(images_in_square, 4) new_im = Image.new(mode, (images.shape[1] * size, images.shape[2]", "the images are in 4 dimensions if len(data_batch.shape) < 4: data_batch = data_batch.reshape(data_batch.shape", "grid size = math.floor(np.sqrt(images.shape[0])) # scale 0 to 255 rgb color images =", "# size of grid size = math.floor(np.sqrt(images.shape[0])) # scale 0 to 255 rgb", "Make sure the images are in 4 dimensions if len(data_batch.shape) < 4: data_batch", "combine images for col_i, col_images in enumerate(images_in_square): for image_i, image in enumerate(col_images): im", "image :param height: height of image :param mode: image mode :return Image data", "files in images]).astype(np.float32) # Make sure the images are in 4 dimensions if", "= data_batch.reshape(data_batch.shape + (1,)) return data_batch def images_to_grid(images, mode): # size of grid", "new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2])) return new_im print(new_im, images.shape[1] * size)", "images.shape[1], images.shape[2], images.shape[3])) if mode == 'L': images_in_square = np.squeeze(images_in_square, 4) new_im =", ":param width: width of image :param height: height of image :param mode: image", "width: width of image :param height: height of image :param mode: image mode", "len(data_batch.shape) < 4: data_batch = data_batch.reshape(data_batch.shape + (1,)) return data_batch def images_to_grid(images, mode):", ":param mode: image mode :return Image data \"\"\" image = Image.open(image_path) return np.array(image.convert(mode))", "math.floor(np.sqrt(images.shape[0])) # scale 0 to 255 rgb color images = (((images - images.min())", "math def get_image(image_path, width, height, mode): \"\"\" Read image :param image_path: path of", "rgb color images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)", "sure the images are in 4 dimensions if len(data_batch.shape) < 4: data_batch =", "images_in_square = np.reshape( images[:size * size], (size, size, images.shape[1], images.shape[2], images.shape[3])) if mode", "import numpy as np from PIL import Image import math def get_image(image_path, width,", "of image :param height: height of image :param mode: image mode :return Image", "size = math.floor(np.sqrt(images.shape[0])) # scale 0 to 255 rgb color images = (((images", "= Image.open(image_path) return np.array(image.convert(mode)) def get_batch(images, width, height, mode): data_batch = np.array( [get_image(files,", "images.min()) * 255) / (images.max() - images.min())).astype(np.uint8) # arrange images in square images_in_square", "data_batch = np.array( [get_image(files, width, height, mode) for files in images]).astype(np.float32) # Make", "size], (size, size, images.shape[1], images.shape[2], images.shape[3])) if mode == 'L': images_in_square = np.squeeze(images_in_square,", "if mode == 'L': images_in_square = np.squeeze(images_in_square, 4) new_im = Image.new(mode, (images.shape[1] *", "numpy as np from PIL import Image import math def get_image(image_path, width, height,", "width of image :param height: height of image :param mode: image mode :return", "return np.array(image.convert(mode)) def get_batch(images, width, height, mode): data_batch = np.array( [get_image(files, width, height,", "def get_image(image_path, width, height, mode): \"\"\" Read image :param image_path: path of image", "return data_batch def images_to_grid(images, mode): # size of grid size = math.floor(np.sqrt(images.shape[0])) #", "in square images_in_square = np.reshape( images[:size * size], (size, size, images.shape[1], images.shape[2], images.shape[3]))", "(size, size, images.shape[1], images.shape[2], images.shape[3])) if mode == 'L': images_in_square = np.squeeze(images_in_square, 4)", "data_batch.reshape(data_batch.shape + (1,)) return data_batch def images_to_grid(images, mode): # size of grid size", "import Image import math def get_image(image_path, width, height, mode): \"\"\" Read image :param", "data_batch def images_to_grid(images, mode): # size of grid size = math.floor(np.sqrt(images.shape[0])) # scale", "def images_to_grid(images, mode): # size of grid size = math.floor(np.sqrt(images.shape[0])) # scale 0", "(images.shape[1] * size, images.shape[2] * size)) # combine images for col_i, col_images in", "width, height, mode): data_batch = np.array( [get_image(files, width, height, mode) for files in", "image_path: path of image :param width: width of image :param height: height of", "image_i, image in enumerate(col_images): im = Image.fromarray(image, mode) new_im.paste(im, (col_i * images.shape[1], image_i", "images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8) # arrange", "import math def get_image(image_path, width, height, mode): \"\"\" Read image :param image_path: path", ":param height: height of image :param mode: image mode :return Image data \"\"\"" ]
[ "new_ele conn = manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(), device_params={\"name\": \"junos\"}, hostkey_verify=False, allow_agent=False, look_for_keys=False, port=830,", "rpc = new_ele(\"get-software-information\") nc_out = conn.rpc(rpc) filter = \"\"\" <filter type=\"subtree\"> <configuration> <interfaces>", "conn = manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(), device_params={\"name\": \"junos\"}, hostkey_verify=False, allow_agent=False, look_for_keys=False, port=830, timeout=60,", "\"junos\"}, hostkey_verify=False, allow_agent=False, look_for_keys=False, port=830, timeout=60, ) ipdb.set_trace() rpc = new_ele(\"get-software-information\") nc_out =", "= new_ele(\"get-software-information\") nc_out = conn.rpc(rpc) filter = \"\"\" <filter type=\"subtree\"> <configuration> <interfaces> </interfaces>", "import ipdb from ncclient import manager from getpass import getpass from ncclient.xml_ import", "= conn.rpc(rpc) filter = \"\"\" <filter type=\"subtree\"> <configuration> <interfaces> </interfaces> </configuration> </filter> \"\"\"", "ncclient import manager from getpass import getpass from ncclient.xml_ import new_ele conn =", "is an XML like thing print(nc_out.tostring.decode()) print(nc_out.find(\".//product-name\")) print(nc_out.find(\".//product-name\").text) print(nc_out.find(\".//product-model\").text) config = conn.get_config(source=\"running\") config_xml", "port=830, timeout=60, ) ipdb.set_trace() rpc = new_ele(\"get-software-information\") nc_out = conn.rpc(rpc) filter = \"\"\"", "host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(), device_params={\"name\": \"junos\"}, hostkey_verify=False, allow_agent=False, look_for_keys=False, port=830, timeout=60, ) ipdb.set_trace() rpc", "ncclient.xml_ import new_ele conn = manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(), device_params={\"name\": \"junos\"}, hostkey_verify=False, allow_agent=False,", "from getpass import getpass from ncclient.xml_ import new_ele conn = manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\",", "from ncclient.xml_ import new_ele conn = manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(), device_params={\"name\": \"junos\"}, hostkey_verify=False,", "manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(), device_params={\"name\": \"junos\"}, hostkey_verify=False, allow_agent=False, look_for_keys=False, port=830, timeout=60, ) ipdb.set_trace()", "<reponame>ksannedhi/pyplus_course import ipdb from ncclient import manager from getpass import getpass from ncclient.xml_", "= manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(), device_params={\"name\": \"junos\"}, hostkey_verify=False, allow_agent=False, look_for_keys=False, port=830, timeout=60, )", "It is an XML like thing print(nc_out.tostring.decode()) print(nc_out.find(\".//product-name\")) print(nc_out.find(\".//product-name\").text) print(nc_out.find(\".//product-model\").text) config = conn.get_config(source=\"running\")", ") ipdb.set_trace() rpc = new_ele(\"get-software-information\") nc_out = conn.rpc(rpc) filter = \"\"\" <filter type=\"subtree\">", "an XML like thing print(nc_out.tostring.decode()) print(nc_out.find(\".//product-name\")) print(nc_out.find(\".//product-name\").text) print(nc_out.find(\".//product-model\").text) config = conn.get_config(source=\"running\") config_xml =", "device_params={\"name\": \"junos\"}, hostkey_verify=False, allow_agent=False, look_for_keys=False, port=830, timeout=60, ) ipdb.set_trace() rpc = new_ele(\"get-software-information\") nc_out", "import manager from getpass import getpass from ncclient.xml_ import new_ele conn = manager.connect(", "</configuration> </filter> \"\"\" # It is an XML like thing print(nc_out.tostring.decode()) print(nc_out.find(\".//product-name\")) print(nc_out.find(\".//product-name\").text)", "nc_out = conn.rpc(rpc) filter = \"\"\" <filter type=\"subtree\"> <configuration> <interfaces> </interfaces> </configuration> </filter>", "username=\"pyclass\", password=getpass(), device_params={\"name\": \"junos\"}, hostkey_verify=False, allow_agent=False, look_for_keys=False, port=830, timeout=60, ) ipdb.set_trace() rpc =", "ipdb from ncclient import manager from getpass import getpass from ncclient.xml_ import new_ele", "getpass from ncclient.xml_ import new_ele conn = manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(), device_params={\"name\": \"junos\"},", "getpass import getpass from ncclient.xml_ import new_ele conn = manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(),", "look_for_keys=False, port=830, timeout=60, ) ipdb.set_trace() rpc = new_ele(\"get-software-information\") nc_out = conn.rpc(rpc) filter =", "\"\"\" # It is an XML like thing print(nc_out.tostring.decode()) print(nc_out.find(\".//product-name\")) print(nc_out.find(\".//product-name\").text) print(nc_out.find(\".//product-model\").text) config", "import getpass from ncclient.xml_ import new_ele conn = manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(), device_params={\"name\":", "allow_agent=False, look_for_keys=False, port=830, timeout=60, ) ipdb.set_trace() rpc = new_ele(\"get-software-information\") nc_out = conn.rpc(rpc) filter", "timeout=60, ) ipdb.set_trace() rpc = new_ele(\"get-software-information\") nc_out = conn.rpc(rpc) filter = \"\"\" <filter", "<configuration> <interfaces> </interfaces> </configuration> </filter> \"\"\" # It is an XML like thing", "<filter type=\"subtree\"> <configuration> <interfaces> </interfaces> </configuration> </filter> \"\"\" # It is an XML", "<interfaces> </interfaces> </configuration> </filter> \"\"\" # It is an XML like thing print(nc_out.tostring.decode())", "type=\"subtree\"> <configuration> <interfaces> </interfaces> </configuration> </filter> \"\"\" # It is an XML like", "</interfaces> </configuration> </filter> \"\"\" # It is an XML like thing print(nc_out.tostring.decode()) print(nc_out.find(\".//product-name\"))", "XML like thing print(nc_out.tostring.decode()) print(nc_out.find(\".//product-name\")) print(nc_out.find(\".//product-name\").text) print(nc_out.find(\".//product-model\").text) config = conn.get_config(source=\"running\") config_xml = config.data_xml", "import new_ele conn = manager.connect( host=\"srx2.lasthop.io\", username=\"pyclass\", password=getpass(), device_params={\"name\": \"junos\"}, hostkey_verify=False, allow_agent=False, look_for_keys=False,", "filter = \"\"\" <filter type=\"subtree\"> <configuration> <interfaces> </interfaces> </configuration> </filter> \"\"\" # It", "password=getpass(), device_params={\"name\": \"junos\"}, hostkey_verify=False, allow_agent=False, look_for_keys=False, port=830, timeout=60, ) ipdb.set_trace() rpc = new_ele(\"get-software-information\")", "hostkey_verify=False, allow_agent=False, look_for_keys=False, port=830, timeout=60, ) ipdb.set_trace() rpc = new_ele(\"get-software-information\") nc_out = conn.rpc(rpc)", "from ncclient import manager from getpass import getpass from ncclient.xml_ import new_ele conn", "= \"\"\" <filter type=\"subtree\"> <configuration> <interfaces> </interfaces> </configuration> </filter> \"\"\" # It is", "</filter> \"\"\" # It is an XML like thing print(nc_out.tostring.decode()) print(nc_out.find(\".//product-name\")) print(nc_out.find(\".//product-name\").text) print(nc_out.find(\".//product-model\").text)", "# It is an XML like thing print(nc_out.tostring.decode()) print(nc_out.find(\".//product-name\")) print(nc_out.find(\".//product-name\").text) print(nc_out.find(\".//product-model\").text) config =", "new_ele(\"get-software-information\") nc_out = conn.rpc(rpc) filter = \"\"\" <filter type=\"subtree\"> <configuration> <interfaces> </interfaces> </configuration>", "ipdb.set_trace() rpc = new_ele(\"get-software-information\") nc_out = conn.rpc(rpc) filter = \"\"\" <filter type=\"subtree\"> <configuration>", "\"\"\" <filter type=\"subtree\"> <configuration> <interfaces> </interfaces> </configuration> </filter> \"\"\" # It is an", "conn.rpc(rpc) filter = \"\"\" <filter type=\"subtree\"> <configuration> <interfaces> </interfaces> </configuration> </filter> \"\"\" #", "manager from getpass import getpass from ncclient.xml_ import new_ele conn = manager.connect( host=\"srx2.lasthop.io\"," ]
[ "from django.db import models from community.models import Community from root import settings \"\"\"", "Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True) community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False,", "\"\"\" Subscription object model \"\"\" class Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False,", "from root import settings \"\"\" Subscription object model \"\"\" class Subscription(models.Model): user =", "class Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True) community = models.ForeignKey(Community, on_delete=models.PROTECT,", "from community.models import Community from root import settings \"\"\" Subscription object model \"\"\"", "models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True) created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False) def __str__(self): return", "Community from root import settings \"\"\" Subscription object model \"\"\" class Subscription(models.Model): user", "\"\"\" class Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True) community = models.ForeignKey(Community,", "on_delete=models.PROTECT, blank=False, null=False, db_index=True) community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True) created_on =", "on_delete=models.PROTECT, blank=False, null=False, db_index=True) created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False) def __str__(self): return str(self.id)", "settings \"\"\" Subscription object model \"\"\" class Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False,", "user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True) community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False,", "= models.DateTimeField(auto_now_add=True, blank=False, null=False) def __str__(self): return str(self.id) class Meta: verbose_name_plural = \"subscriptions\"", "django.db import models from community.models import Community from root import settings \"\"\" Subscription", "root import settings \"\"\" Subscription object model \"\"\" class Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL,", "community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True) created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False) def", "import models from community.models import Community from root import settings \"\"\" Subscription object", "models from community.models import Community from root import settings \"\"\" Subscription object model", "community.models import Community from root import settings \"\"\" Subscription object model \"\"\" class", "null=False, db_index=True) community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True) created_on = models.DateTimeField(auto_now_add=True, blank=False,", "blank=False, null=False, db_index=True) community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True) created_on = models.DateTimeField(auto_now_add=True,", "object model \"\"\" class Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True) community", "Subscription object model \"\"\" class Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True)", "models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True) community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True) created_on", "blank=False, null=False, db_index=True) created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False) def __str__(self): return str(self.id) class", "null=False, db_index=True) created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False) def __str__(self): return str(self.id) class Meta:", "model \"\"\" class Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True) community =", "db_index=True) created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False) def __str__(self): return str(self.id) class Meta: verbose_name_plural", "db_index=True) community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True) created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False)", "= models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT, blank=False, null=False, db_index=True) community = models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True)", "= models.ForeignKey(Community, on_delete=models.PROTECT, blank=False, null=False, db_index=True) created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False) def __str__(self):", "import Community from root import settings \"\"\" Subscription object model \"\"\" class Subscription(models.Model):", "created_on = models.DateTimeField(auto_now_add=True, blank=False, null=False) def __str__(self): return str(self.id) class Meta: verbose_name_plural =", "import settings \"\"\" Subscription object model \"\"\" class Subscription(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.PROTECT," ]
[ "1.0 if self.is_trigger_forward_init: linear_forward = data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) > 0.01: self.is_trigger_forward_init", "self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = True if __name__ == '__main__': rospy.init_node('teleop_four_wheel_steering_joy', anonymous=False) try: teleop_four_wheel_steering_joy =", "+ \" speed %s\", speed) if abs(speed) > self.axis_dead_zone: four_wheel_steering_msg.speed = speed*self.scale_linear if", "2 # Left Trigger self.scale_linear = 1 self.axis_front_steering = 0 # Right left-right", "False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = True if __name__ == '__main__': rospy.init_node('teleop_four_wheel_steering_joy', anonymous=False) try: teleop_four_wheel_steering_joy", "new_cmd: #rospy.loginfo(rospy.get_caller_id() + \" axes\" + str(data.axes)) linear_forward = 1.0 linear_reverse = 1.0", "self.enable_button = [4,5] self.sent_disable_msg = False self.is_trigger_forward_init = False self.is_trigger_reverse_init = False self.last_forward_speed", "self.last_reverse_speed = 0.0 rospy.Subscriber(\"joy\", Joy, self.callback) self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10) rospy.spin() def", "FourWheelSteering() new_cmd = False for button in self.enable_button: if data.buttons[button]: new_cmd = True", "0 # Right left-right stick self.axis_rear_steering = 3 # Left left-right stick self.scale_steering", "= 5 # Right Trigger self.axis_linear_reverse = 2 # Left Trigger self.scale_linear =", "queue_size=10) rospy.spin() def callback(self, data): four_wheel_steering_msg = FourWheelSteering() new_cmd = False for button", "if data.buttons[button]: new_cmd = True if new_cmd: #rospy.loginfo(rospy.get_caller_id() + \" axes\" + str(data.axes))", "if abs(speed) > self.axis_dead_zone: four_wheel_steering_msg.speed = speed*self.scale_linear if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle =", "four_wheel_steering_msg.speed = speed*self.scale_linear if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle =", "= pi/10.0 self.enable_button = [4,5] self.sent_disable_msg = False self.is_trigger_forward_init = False self.is_trigger_reverse_init =", "= 1.0 linear_reverse = 1.0 if self.is_trigger_forward_init: linear_forward = data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed -", "> self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle = 0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = False", "0.01: self.is_trigger_reverse_init = True else: self.last_reverse_speed = data.axes[self.axis_linear_reverse] speed = (-linear_forward + linear_reverse)/2.0", "%s\", speed) if abs(speed) > self.axis_dead_zone: four_wheel_steering_msg.speed = speed*self.scale_linear if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone:", "True else: self.last_forward_speed = data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init: linear_reverse = data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed -", "Trigger self.scale_linear = 1 self.axis_front_steering = 0 # Right left-right stick self.axis_rear_steering =", "= False for button in self.enable_button: if data.buttons[button]: new_cmd = True if new_cmd:", "> self.axis_dead_zone: four_wheel_steering_msg.speed = speed*self.scale_linear if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering else:", "if new_cmd: #rospy.loginfo(rospy.get_caller_id() + \" axes\" + str(data.axes)) linear_forward = 1.0 linear_reverse =", "self.axis_linear_forward = 5 # Right Trigger self.axis_linear_reverse = 2 # Left Trigger self.scale_linear", "self.is_trigger_reverse_init = True else: self.last_reverse_speed = data.axes[self.axis_linear_reverse] speed = (-linear_forward + linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id()", "0.0 self.last_reverse_speed = 0.0 rospy.Subscriber(\"joy\", Joy, self.callback) self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10) rospy.spin()", "False self.is_trigger_forward_init = False self.is_trigger_reverse_init = False self.last_forward_speed = 0.0 self.last_reverse_speed = 0.0", "stick self.scale_steering = pi/10.0 self.enable_button = [4,5] self.sent_disable_msg = False self.is_trigger_forward_init = False", "four_wheel_steering_msgs.msg import FourWheelSteering from sensor_msgs.msg import Joy class TeleopFourWheelSteeringJoy(): def __init__(self): self.axis_dead_zone =", "Joy, self.callback) self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10) rospy.spin() def callback(self, data): four_wheel_steering_msg =", "self.last_forward_speed = data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init: linear_reverse = data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) >", "= speed*self.scale_linear if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle = 0.0", "= data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle = 0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = False else: if self.sent_disable_msg", "= 0.0 if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle = 0.0", "True if new_cmd: #rospy.loginfo(rospy.get_caller_id() + \" axes\" + str(data.axes)) linear_forward = 1.0 linear_reverse", "data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) > 0.01: self.is_trigger_forward_init = True else: self.last_forward_speed =", "Right Trigger self.axis_linear_reverse = 2 # Left Trigger self.scale_linear = 1 self.axis_front_steering =", "= 0.0 self.last_reverse_speed = 0.0 rospy.Subscriber(\"joy\", Joy, self.callback) self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10)", "# Right left-right stick self.axis_rear_steering = 3 # Left left-right stick self.scale_steering =", "= data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) > 0.01: self.is_trigger_forward_init = True else: self.last_forward_speed", "self.is_trigger_forward_init: linear_forward = data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) > 0.01: self.is_trigger_forward_init = True", "data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init: linear_reverse = data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) > 0.01: self.is_trigger_reverse_init", "else: self.last_reverse_speed = data.axes[self.axis_linear_reverse] speed = (-linear_forward + linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() + \" speed", "+ linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() + \" speed %s\", speed) if abs(speed) > self.axis_dead_zone: four_wheel_steering_msg.speed", "= 2 # Left Trigger self.scale_linear = 1 self.axis_front_steering = 0 # Right", "self.axis_front_steering = 0 # Right left-right stick self.axis_rear_steering = 3 # Left left-right", "# Right Trigger self.axis_linear_reverse = 2 # Left Trigger self.scale_linear = 1 self.axis_front_steering", "callback(self, data): four_wheel_steering_msg = FourWheelSteering() new_cmd = False for button in self.enable_button: if", "= True else: self.last_forward_speed = data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init: linear_reverse = data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed", "(-linear_forward + linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() + \" speed %s\", speed) if abs(speed) > self.axis_dead_zone:", "self.is_trigger_forward_init = True else: self.last_forward_speed = data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init: linear_reverse = data.axes[self.axis_linear_reverse] elif", "= True if new_cmd: #rospy.loginfo(rospy.get_caller_id() + \" axes\" + str(data.axes)) linear_forward = 1.0", "- data.axes[self.axis_linear_reverse]) > 0.01: self.is_trigger_reverse_init = True else: self.last_reverse_speed = data.axes[self.axis_linear_reverse] speed =", "self.axis_dead_zone: four_wheel_steering_msg.speed = speed*self.scale_linear if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle", "data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle = 0.0 if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering else:", "#!/usr/bin/env python import rospy from math import pi from four_wheel_steering_msgs.msg import FourWheelSteering from", "self.is_trigger_reverse_init = False self.last_forward_speed = 0.0 self.last_reverse_speed = 0.0 rospy.Subscriber(\"joy\", Joy, self.callback) self.pub", "self.is_trigger_forward_init = False self.is_trigger_reverse_init = False self.last_forward_speed = 0.0 self.last_reverse_speed = 0.0 rospy.Subscriber(\"joy\",", "data): four_wheel_steering_msg = FourWheelSteering() new_cmd = False for button in self.enable_button: if data.buttons[button]:", "0.01: self.is_trigger_forward_init = True else: self.last_forward_speed = data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init: linear_reverse = data.axes[self.axis_linear_reverse]", "True else: self.last_reverse_speed = data.axes[self.axis_linear_reverse] speed = (-linear_forward + linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() + \"", "linear_forward = 1.0 linear_reverse = 1.0 if self.is_trigger_forward_init: linear_forward = data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed", "= 3 # Left left-right stick self.scale_steering = pi/10.0 self.enable_button = [4,5] self.sent_disable_msg", "else: four_wheel_steering_msg.rear_steering_angle = 0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = False else: if self.sent_disable_msg == False:", "= 0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = False else: if self.sent_disable_msg == False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg", "0.0 rospy.Subscriber(\"joy\", Joy, self.callback) self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10) rospy.spin() def callback(self, data):", "stick self.axis_rear_steering = 3 # Left left-right stick self.scale_steering = pi/10.0 self.enable_button =", "== False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = True if __name__ == '__main__': rospy.init_node('teleop_four_wheel_steering_joy', anonymous=False) try:", "linear_forward = data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) > 0.01: self.is_trigger_forward_init = True else:", "0.05 self.axis_linear_forward = 5 # Right Trigger self.axis_linear_reverse = 2 # Left Trigger", "Left Trigger self.scale_linear = 1 self.axis_front_steering = 0 # Right left-right stick self.axis_rear_steering", "button in self.enable_button: if data.buttons[button]: new_cmd = True if new_cmd: #rospy.loginfo(rospy.get_caller_id() + \"", "self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle = 0.0 if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle", "FourWheelSteering from sensor_msgs.msg import Joy class TeleopFourWheelSteeringJoy(): def __init__(self): self.axis_dead_zone = 0.05 self.axis_linear_forward", "+ \" axes\" + str(data.axes)) linear_forward = 1.0 linear_reverse = 1.0 if self.is_trigger_forward_init:", "math import pi from four_wheel_steering_msgs.msg import FourWheelSteering from sensor_msgs.msg import Joy class TeleopFourWheelSteeringJoy():", "self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10) rospy.spin() def callback(self, data): four_wheel_steering_msg = FourWheelSteering() new_cmd", "for button in self.enable_button: if data.buttons[button]: new_cmd = True if new_cmd: #rospy.loginfo(rospy.get_caller_id() +", "speed = (-linear_forward + linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() + \" speed %s\", speed) if abs(speed)", "FourWheelSteering, queue_size=10) rospy.spin() def callback(self, data): four_wheel_steering_msg = FourWheelSteering() new_cmd = False for", "rospy.spin() def callback(self, data): four_wheel_steering_msg = FourWheelSteering() new_cmd = False for button in", "#rospy.loginfo(rospy.get_caller_id() + \" axes\" + str(data.axes)) linear_forward = 1.0 linear_reverse = 1.0 if", "__init__(self): self.axis_dead_zone = 0.05 self.axis_linear_forward = 5 # Right Trigger self.axis_linear_reverse = 2", "self.last_reverse_speed = data.axes[self.axis_linear_reverse] speed = (-linear_forward + linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() + \" speed %s\",", "Trigger self.axis_linear_reverse = 2 # Left Trigger self.scale_linear = 1 self.axis_front_steering = 0", "self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = False else: if self.sent_disable_msg == False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = True", "in self.enable_button: if data.buttons[button]: new_cmd = True if new_cmd: #rospy.loginfo(rospy.get_caller_id() + \" axes\"", "= [4,5] self.sent_disable_msg = False self.is_trigger_forward_init = False self.is_trigger_reverse_init = False self.last_forward_speed =", "data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle = 0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = False else: if self.sent_disable_msg ==", "import FourWheelSteering from sensor_msgs.msg import Joy class TeleopFourWheelSteeringJoy(): def __init__(self): self.axis_dead_zone = 0.05", "self.sent_disable_msg = True if __name__ == '__main__': rospy.init_node('teleop_four_wheel_steering_joy', anonymous=False) try: teleop_four_wheel_steering_joy = TeleopFourWheelSteeringJoy()", "= 0 # Right left-right stick self.axis_rear_steering = 3 # Left left-right stick", "False self.last_forward_speed = 0.0 self.last_reverse_speed = 0.0 rospy.Subscriber(\"joy\", Joy, self.callback) self.pub = rospy.Publisher('cmd_four_wheel_steering',", "from sensor_msgs.msg import Joy class TeleopFourWheelSteeringJoy(): def __init__(self): self.axis_dead_zone = 0.05 self.axis_linear_forward =", "\" axes\" + str(data.axes)) linear_forward = 1.0 linear_reverse = 1.0 if self.is_trigger_forward_init: linear_forward", "> 0.01: self.is_trigger_forward_init = True else: self.last_forward_speed = data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init: linear_reverse =", "False self.is_trigger_reverse_init = False self.last_forward_speed = 0.0 self.last_reverse_speed = 0.0 rospy.Subscriber(\"joy\", Joy, self.callback)", "left-right stick self.axis_rear_steering = 3 # Left left-right stick self.scale_steering = pi/10.0 self.enable_button", "self.axis_rear_steering = 3 # Left left-right stick self.scale_steering = pi/10.0 self.enable_button = [4,5]", "four_wheel_steering_msg.rear_steering_angle = 0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = False else: if self.sent_disable_msg == False: self.pub.publish(four_wheel_steering_msg)", "data.axes[self.axis_linear_reverse] speed = (-linear_forward + linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() + \" speed %s\", speed) if", "self.sent_disable_msg = False else: if self.sent_disable_msg == False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = True if", "four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle = 0.0 if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle =", "class TeleopFourWheelSteeringJoy(): def __init__(self): self.axis_dead_zone = 0.05 self.axis_linear_forward = 5 # Right Trigger", "= data.axes[self.axis_linear_reverse] speed = (-linear_forward + linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() + \" speed %s\", speed)", "> self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle = 0.0 if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone:", "self.axis_linear_reverse = 2 # Left Trigger self.scale_linear = 1 self.axis_front_steering = 0 #", "3 # Left left-right stick self.scale_steering = pi/10.0 self.enable_button = [4,5] self.sent_disable_msg =", "pi from four_wheel_steering_msgs.msg import FourWheelSteering from sensor_msgs.msg import Joy class TeleopFourWheelSteeringJoy(): def __init__(self):", "str(data.axes)) linear_forward = 1.0 linear_reverse = 1.0 if self.is_trigger_forward_init: linear_forward = data.axes[self.axis_linear_forward] elif", "= 1.0 if self.is_trigger_forward_init: linear_forward = data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) > 0.01:", "pi/10.0 self.enable_button = [4,5] self.sent_disable_msg = False self.is_trigger_forward_init = False self.is_trigger_reverse_init = False", "abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) > 0.01: self.is_trigger_forward_init = True else: self.last_forward_speed = data.axes[self.axis_linear_forward] if", "speed %s\", speed) if abs(speed) > self.axis_dead_zone: four_wheel_steering_msg.speed = speed*self.scale_linear if abs(data.axes[self.axis_front_steering]) >", "= True else: self.last_reverse_speed = data.axes[self.axis_linear_reverse] speed = (-linear_forward + linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() +", "sensor_msgs.msg import Joy class TeleopFourWheelSteeringJoy(): def __init__(self): self.axis_dead_zone = 0.05 self.axis_linear_forward = 5", "data.axes[self.axis_linear_forward]) > 0.01: self.is_trigger_forward_init = True else: self.last_forward_speed = data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init: linear_reverse", "else: four_wheel_steering_msg.front_steering_angle = 0.0 if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle", "#rospy.loginfo(rospy.get_caller_id() + \" speed %s\", speed) if abs(speed) > self.axis_dead_zone: four_wheel_steering_msg.speed = speed*self.scale_linear", "= False self.is_trigger_forward_init = False self.is_trigger_reverse_init = False self.last_forward_speed = 0.0 self.last_reverse_speed =", "def callback(self, data): four_wheel_steering_msg = FourWheelSteering() new_cmd = False for button in self.enable_button:", "= False self.last_forward_speed = 0.0 self.last_reverse_speed = 0.0 rospy.Subscriber(\"joy\", Joy, self.callback) self.pub =", "1 self.axis_front_steering = 0 # Right left-right stick self.axis_rear_steering = 3 # Left", "speed) if abs(speed) > self.axis_dead_zone: four_wheel_steering_msg.speed = speed*self.scale_linear if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle", "linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() + \" speed %s\", speed) if abs(speed) > self.axis_dead_zone: four_wheel_steering_msg.speed =", "= data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) > 0.01: self.is_trigger_reverse_init = True else: self.last_reverse_speed", "self.sent_disable_msg = False self.is_trigger_forward_init = False self.is_trigger_reverse_init = False self.last_forward_speed = 0.0 self.last_reverse_speed", "= data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init: linear_reverse = data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) > 0.01:", "elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) > 0.01: self.is_trigger_reverse_init = True else: self.last_reverse_speed = data.axes[self.axis_linear_reverse]", "if __name__ == '__main__': rospy.init_node('teleop_four_wheel_steering_joy', anonymous=False) try: teleop_four_wheel_steering_joy = TeleopFourWheelSteeringJoy() except rospy.ROSInterruptException: pass", "= False self.is_trigger_reverse_init = False self.last_forward_speed = 0.0 self.last_reverse_speed = 0.0 rospy.Subscriber(\"joy\", Joy,", "rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10) rospy.spin() def callback(self, data): four_wheel_steering_msg = FourWheelSteering() new_cmd = False", "four_wheel_steering_msg.front_steering_angle = 0.0 if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle =", "self.axis_dead_zone = 0.05 self.axis_linear_forward = 5 # Right Trigger self.axis_linear_reverse = 2 #", "= 0.05 self.axis_linear_forward = 5 # Right Trigger self.axis_linear_reverse = 2 # Left", "left-right stick self.scale_steering = pi/10.0 self.enable_button = [4,5] self.sent_disable_msg = False self.is_trigger_forward_init =", "abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle = 0.0 if abs(data.axes[self.axis_rear_steering]) >", "+ str(data.axes)) linear_forward = 1.0 linear_reverse = 1.0 if self.is_trigger_forward_init: linear_forward = data.axes[self.axis_linear_forward]", "\" speed %s\", speed) if abs(speed) > self.axis_dead_zone: four_wheel_steering_msg.speed = speed*self.scale_linear if abs(data.axes[self.axis_front_steering])", "if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle = 0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg", "four_wheel_steering_msg = FourWheelSteering() new_cmd = False for button in self.enable_button: if data.buttons[button]: new_cmd", "if self.sent_disable_msg == False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = True if __name__ == '__main__': rospy.init_node('teleop_four_wheel_steering_joy',", "if self.is_trigger_forward_init: linear_forward = data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) > 0.01: self.is_trigger_forward_init =", "TeleopFourWheelSteeringJoy(): def __init__(self): self.axis_dead_zone = 0.05 self.axis_linear_forward = 5 # Right Trigger self.axis_linear_reverse", "<reponame>cagrikilic/simulation-environment<gh_stars>1-10 #!/usr/bin/env python import rospy from math import pi from four_wheel_steering_msgs.msg import FourWheelSteering", "from math import pi from four_wheel_steering_msgs.msg import FourWheelSteering from sensor_msgs.msg import Joy class", "import pi from four_wheel_steering_msgs.msg import FourWheelSteering from sensor_msgs.msg import Joy class TeleopFourWheelSteeringJoy(): def", "= FourWheelSteering() new_cmd = False for button in self.enable_button: if data.buttons[button]: new_cmd =", "self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle = 0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = False else:", "= rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10) rospy.spin() def callback(self, data): four_wheel_steering_msg = FourWheelSteering() new_cmd =", "Right left-right stick self.axis_rear_steering = 3 # Left left-right stick self.scale_steering = pi/10.0", "four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle = 0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = False else: if", "= 1 self.axis_front_steering = 0 # Right left-right stick self.axis_rear_steering = 3 #", "Joy class TeleopFourWheelSteeringJoy(): def __init__(self): self.axis_dead_zone = 0.05 self.axis_linear_forward = 5 # Right", "python import rospy from math import pi from four_wheel_steering_msgs.msg import FourWheelSteering from sensor_msgs.msg", "# Left left-right stick self.scale_steering = pi/10.0 self.enable_button = [4,5] self.sent_disable_msg = False", "Left left-right stick self.scale_steering = pi/10.0 self.enable_button = [4,5] self.sent_disable_msg = False self.is_trigger_forward_init", "self.is_trigger_reverse_init: linear_reverse = data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) > 0.01: self.is_trigger_reverse_init = True", "0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = False else: if self.sent_disable_msg == False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg =", "def __init__(self): self.axis_dead_zone = 0.05 self.axis_linear_forward = 5 # Right Trigger self.axis_linear_reverse =", "if self.is_trigger_reverse_init: linear_reverse = data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) > 0.01: self.is_trigger_reverse_init =", "self.scale_steering = pi/10.0 self.enable_button = [4,5] self.sent_disable_msg = False self.is_trigger_forward_init = False self.is_trigger_reverse_init", "= False else: if self.sent_disable_msg == False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = True if __name__", "False for button in self.enable_button: if data.buttons[button]: new_cmd = True if new_cmd: #rospy.loginfo(rospy.get_caller_id()", "linear_reverse = data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) > 0.01: self.is_trigger_reverse_init = True else:", "abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle = 0.0 self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg =", "rospy.Subscriber(\"joy\", Joy, self.callback) self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10) rospy.spin() def callback(self, data): four_wheel_steering_msg", "import rospy from math import pi from four_wheel_steering_msgs.msg import FourWheelSteering from sensor_msgs.msg import", "[4,5] self.sent_disable_msg = False self.is_trigger_forward_init = False self.is_trigger_reverse_init = False self.last_forward_speed = 0.0", "axes\" + str(data.axes)) linear_forward = 1.0 linear_reverse = 1.0 if self.is_trigger_forward_init: linear_forward =", "self.sent_disable_msg == False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = True if __name__ == '__main__': rospy.init_node('teleop_four_wheel_steering_joy', anonymous=False)", "new_cmd = False for button in self.enable_button: if data.buttons[button]: new_cmd = True if", "5 # Right Trigger self.axis_linear_reverse = 2 # Left Trigger self.scale_linear = 1", "1.0 linear_reverse = 1.0 if self.is_trigger_forward_init: linear_forward = data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward])", "linear_reverse = 1.0 if self.is_trigger_forward_init: linear_forward = data.axes[self.axis_linear_forward] elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) >", "# Left Trigger self.scale_linear = 1 self.axis_front_steering = 0 # Right left-right stick", "if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle = 0.0 if abs(data.axes[self.axis_rear_steering])", "else: self.last_forward_speed = data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init: linear_reverse = data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse])", "= (-linear_forward + linear_reverse)/2.0 #rospy.loginfo(rospy.get_caller_id() + \" speed %s\", speed) if abs(speed) >", "= True if __name__ == '__main__': rospy.init_node('teleop_four_wheel_steering_joy', anonymous=False) try: teleop_four_wheel_steering_joy = TeleopFourWheelSteeringJoy() except", "- data.axes[self.axis_linear_forward]) > 0.01: self.is_trigger_forward_init = True else: self.last_forward_speed = data.axes[self.axis_linear_forward] if self.is_trigger_reverse_init:", "new_cmd = True if new_cmd: #rospy.loginfo(rospy.get_caller_id() + \" axes\" + str(data.axes)) linear_forward =", "else: if self.sent_disable_msg == False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = True if __name__ == '__main__':", "> 0.01: self.is_trigger_reverse_init = True else: self.last_reverse_speed = data.axes[self.axis_linear_reverse] speed = (-linear_forward +", "data.axes[self.axis_linear_reverse] elif abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) > 0.01: self.is_trigger_reverse_init = True else: self.last_reverse_speed =", "rospy from math import pi from four_wheel_steering_msgs.msg import FourWheelSteering from sensor_msgs.msg import Joy", "from four_wheel_steering_msgs.msg import FourWheelSteering from sensor_msgs.msg import Joy class TeleopFourWheelSteeringJoy(): def __init__(self): self.axis_dead_zone", "0.0 if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering else: four_wheel_steering_msg.rear_steering_angle = 0.0 self.pub.publish(four_wheel_steering_msg)", "True if __name__ == '__main__': rospy.init_node('teleop_four_wheel_steering_joy', anonymous=False) try: teleop_four_wheel_steering_joy = TeleopFourWheelSteeringJoy() except rospy.ROSInterruptException:", "self.scale_linear = 1 self.axis_front_steering = 0 # Right left-right stick self.axis_rear_steering = 3", "= 0.0 rospy.Subscriber(\"joy\", Joy, self.callback) self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10) rospy.spin() def callback(self,", "abs(speed) > self.axis_dead_zone: four_wheel_steering_msg.speed = speed*self.scale_linear if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering", "self.enable_button: if data.buttons[button]: new_cmd = True if new_cmd: #rospy.loginfo(rospy.get_caller_id() + \" axes\" +", "False else: if self.sent_disable_msg == False: self.pub.publish(four_wheel_steering_msg) self.sent_disable_msg = True if __name__ ==", "speed*self.scale_linear if abs(data.axes[self.axis_front_steering]) > self.axis_dead_zone: four_wheel_steering_msg.front_steering_angle = data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle = 0.0 if", "self.last_forward_speed = 0.0 self.last_reverse_speed = 0.0 rospy.Subscriber(\"joy\", Joy, self.callback) self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering,", "abs(self.last_reverse_speed - data.axes[self.axis_linear_reverse]) > 0.01: self.is_trigger_reverse_init = True else: self.last_reverse_speed = data.axes[self.axis_linear_reverse] speed", "= data.axes[self.axis_front_steering]*self.scale_steering else: four_wheel_steering_msg.front_steering_angle = 0.0 if abs(data.axes[self.axis_rear_steering]) > self.axis_dead_zone: four_wheel_steering_msg.rear_steering_angle = data.axes[self.axis_rear_steering]*self.scale_steering", "self.callback) self.pub = rospy.Publisher('cmd_four_wheel_steering', FourWheelSteering, queue_size=10) rospy.spin() def callback(self, data): four_wheel_steering_msg = FourWheelSteering()", "elif abs(self.last_forward_speed - data.axes[self.axis_linear_forward]) > 0.01: self.is_trigger_forward_init = True else: self.last_forward_speed = data.axes[self.axis_linear_forward]", "import Joy class TeleopFourWheelSteeringJoy(): def __init__(self): self.axis_dead_zone = 0.05 self.axis_linear_forward = 5 #", "data.buttons[button]: new_cmd = True if new_cmd: #rospy.loginfo(rospy.get_caller_id() + \" axes\" + str(data.axes)) linear_forward", "data.axes[self.axis_linear_reverse]) > 0.01: self.is_trigger_reverse_init = True else: self.last_reverse_speed = data.axes[self.axis_linear_reverse] speed = (-linear_forward" ]
[ "StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True) class Meta: model", "UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True) class Meta: model = StudentVillage fields = \"__all__\"", "serializers from SIFUser.serializer import UserSerializer from .models import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers =", "from rest_framework import serializers from SIFUser.serializer import UserSerializer from .models import StudentVillage class", "import UserSerializer from .models import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True) dormrooms =", "SIFUser.serializer import UserSerializer from .models import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True) dormrooms", "UserSerializer from .models import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True,", "from .models import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True)", ".models import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True) class", "class StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True) class Meta: model =", "managers = UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True) class Meta: model = StudentVillage fields", "StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True) class Meta: model = StudentVillage", "= UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True) class Meta: model = StudentVillage fields =", "import serializers from SIFUser.serializer import UserSerializer from .models import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers", "rest_framework import serializers from SIFUser.serializer import UserSerializer from .models import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer):", "import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True) dormrooms = serializers.PrimaryKeyRelatedField(many=True, read_only=True) class Meta:", "from SIFUser.serializer import UserSerializer from .models import StudentVillage class StudentVillageSerializer(serializers.ModelSerializer): managers = UserSerializer(many=True)" ]
[ "False: os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%a, %d %b", "cli') #parser.add_argument('-h', \"--help\", help='cli helper') parser.add_argument('-category', action='store_true', help='list all the categories of vulnerabilities')", "vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list all the kingdoms') parser.add_argument('-language', action='store_true', help='list all the languages')", "level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%a, %d %b %Y %H:%M:%S' )", "parser.add_argument('-language', action='store_true', help='list all the languages') args = parser.parse_args() if args.category: vulncat.scrape_filters('category') if", "os.path.exists(logpath) == False: os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%a,", "%(message)s', datefmt='%a, %d %b %Y %H:%M:%S' ) parser = argparse.ArgumentParser(description='Vulncat web parser cli')", "help='list all the kingdoms') parser.add_argument('-language', action='store_true', help='list all the languages') args = parser.parse_args()", "- %(levelname)s - %(message)s', datefmt='%a, %d %b %Y %H:%M:%S' ) parser = argparse.ArgumentParser(description='Vulncat", "kingdoms') parser.add_argument('-language', action='store_true', help='list all the languages') args = parser.parse_args() if args.category: vulncat.scrape_filters('category')", "logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%a, %d %b %Y %H:%M:%S'", "vulncat import logging import os loglevel='DEBUG' logpath=f'{os.getcwd()}/log' # create the log directory if", "the languages') args = parser.parse_args() if args.category: vulncat.scrape_filters('category') if args.kingdom: vulncat.scrape_filters('kingdom') if args.language:", "import vulncat import logging import os loglevel='DEBUG' logpath=f'{os.getcwd()}/log' # create the log directory", "logging import os loglevel='DEBUG' logpath=f'{os.getcwd()}/log' # create the log directory if it does", "exist if os.path.exists(logpath) == False: os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s -", "datefmt='%a, %d %b %Y %H:%M:%S' ) parser = argparse.ArgumentParser(description='Vulncat web parser cli') #parser.add_argument('-h',", "import argparse import vulncat import logging import os loglevel='DEBUG' logpath=f'{os.getcwd()}/log' # create the", "# create the log directory if it does not exist if os.path.exists(logpath) ==", "%Y %H:%M:%S' ) parser = argparse.ArgumentParser(description='Vulncat web parser cli') #parser.add_argument('-h', \"--help\", help='cli helper')", "argparse.ArgumentParser(description='Vulncat web parser cli') #parser.add_argument('-h', \"--help\", help='cli helper') parser.add_argument('-category', action='store_true', help='list all the", "help='cli helper') parser.add_argument('-category', action='store_true', help='list all the categories of vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list", "all the categories of vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list all the kingdoms') parser.add_argument('-language', action='store_true',", "parser.add_argument('-kingdom', action='store_true', help='list all the kingdoms') parser.add_argument('-language', action='store_true', help='list all the languages') args", "action='store_true', help='list all the kingdoms') parser.add_argument('-language', action='store_true', help='list all the languages') args =", "os loglevel='DEBUG' logpath=f'{os.getcwd()}/log' # create the log directory if it does not exist", "%H:%M:%S' ) parser = argparse.ArgumentParser(description='Vulncat web parser cli') #parser.add_argument('-h', \"--help\", help='cli helper') parser.add_argument('-category',", "help='list all the categories of vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list all the kingdoms') parser.add_argument('-language',", "does not exist if os.path.exists(logpath) == False: os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s -", "os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%a, %d %b %Y", "not exist if os.path.exists(logpath) == False: os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s", "filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%a, %d %b %Y %H:%M:%S' ) parser", "it does not exist if os.path.exists(logpath) == False: os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s", "the categories of vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list all the kingdoms') parser.add_argument('-language', action='store_true', help='list", "action='store_true', help='list all the languages') args = parser.parse_args() if args.category: vulncat.scrape_filters('category') if args.kingdom:", "import os loglevel='DEBUG' logpath=f'{os.getcwd()}/log' # create the log directory if it does not", "create the log directory if it does not exist if os.path.exists(logpath) == False:", "if os.path.exists(logpath) == False: os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s - %(message)s',", "== False: os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%a, %d", "%(levelname)s - %(message)s', datefmt='%a, %d %b %Y %H:%M:%S' ) parser = argparse.ArgumentParser(description='Vulncat web", ") parser = argparse.ArgumentParser(description='Vulncat web parser cli') #parser.add_argument('-h', \"--help\", help='cli helper') parser.add_argument('-category', action='store_true',", "the log directory if it does not exist if os.path.exists(logpath) == False: os.mkdir(logpath)", "format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%a, %d %b %Y %H:%M:%S' ) parser =", "log directory if it does not exist if os.path.exists(logpath) == False: os.mkdir(logpath) logging.basicConfig(", "- %(message)s', datefmt='%a, %d %b %Y %H:%M:%S' ) parser = argparse.ArgumentParser(description='Vulncat web parser", "#parser.add_argument('-h', \"--help\", help='cli helper') parser.add_argument('-category', action='store_true', help='list all the categories of vulnerabilities') parser.add_argument('-kingdom',", "directory if it does not exist if os.path.exists(logpath) == False: os.mkdir(logpath) logging.basicConfig( level=loglevel,", "helper') parser.add_argument('-category', action='store_true', help='list all the categories of vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list all", "%d %b %Y %H:%M:%S' ) parser = argparse.ArgumentParser(description='Vulncat web parser cli') #parser.add_argument('-h', \"--help\",", "web parser cli') #parser.add_argument('-h', \"--help\", help='cli helper') parser.add_argument('-category', action='store_true', help='list all the categories", "help='list all the languages') args = parser.parse_args() if args.category: vulncat.scrape_filters('category') if args.kingdom: vulncat.scrape_filters('kingdom')", "parser cli') #parser.add_argument('-h', \"--help\", help='cli helper') parser.add_argument('-category', action='store_true', help='list all the categories of", "all the kingdoms') parser.add_argument('-language', action='store_true', help='list all the languages') args = parser.parse_args() if", "parser.add_argument('-category', action='store_true', help='list all the categories of vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list all the", "import logging import os loglevel='DEBUG' logpath=f'{os.getcwd()}/log' # create the log directory if it", "of vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list all the kingdoms') parser.add_argument('-language', action='store_true', help='list all the", "categories of vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list all the kingdoms') parser.add_argument('-language', action='store_true', help='list all", "%b %Y %H:%M:%S' ) parser = argparse.ArgumentParser(description='Vulncat web parser cli') #parser.add_argument('-h', \"--help\", help='cli", "loglevel='DEBUG' logpath=f'{os.getcwd()}/log' # create the log directory if it does not exist if", "logpath=f'{os.getcwd()}/log' # create the log directory if it does not exist if os.path.exists(logpath)", "argparse import vulncat import logging import os loglevel='DEBUG' logpath=f'{os.getcwd()}/log' # create the log", "action='store_true', help='list all the categories of vulnerabilities') parser.add_argument('-kingdom', action='store_true', help='list all the kingdoms')", "= argparse.ArgumentParser(description='Vulncat web parser cli') #parser.add_argument('-h', \"--help\", help='cli helper') parser.add_argument('-category', action='store_true', help='list all", "all the languages') args = parser.parse_args() if args.category: vulncat.scrape_filters('category') if args.kingdom: vulncat.scrape_filters('kingdom') if", "parser = argparse.ArgumentParser(description='Vulncat web parser cli') #parser.add_argument('-h', \"--help\", help='cli helper') parser.add_argument('-category', action='store_true', help='list", "\"--help\", help='cli helper') parser.add_argument('-category', action='store_true', help='list all the categories of vulnerabilities') parser.add_argument('-kingdom', action='store_true',", "the kingdoms') parser.add_argument('-language', action='store_true', help='list all the languages') args = parser.parse_args() if args.category:", "languages') args = parser.parse_args() if args.category: vulncat.scrape_filters('category') if args.kingdom: vulncat.scrape_filters('kingdom') if args.language: vulncat.scrape_filters('codelang')", "if it does not exist if os.path.exists(logpath) == False: os.mkdir(logpath) logging.basicConfig( level=loglevel, filename=f'{logpath}/app.log'," ]
[ "self.tKey = [float(f.split(\"_\")[1][1:]) for f in fn] fn = [os.path.join(dir,f) for f in", "#%% if __name__=='__main__': data_dir = os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200') pair = pairSNE(\"red_p\") pair.loadData(data_dir) #%%", "to see how many bins we have in spectra arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins", "data_dir = os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200') pair = pairSNE(\"red_p\") pair.loadData(data_dir) #%% if __name__=='__main__': print(pair.rawDat)", "Each 2d plane holds two cols #specific luminosity (erg/s/angstrom) and number of photons", "np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins = arr.shape[0] #3D array will hold all the data; Each 2d", "plane holds two cols #specific luminosity (erg/s/angstrom) and number of photons of that", "of that wavelength #rows are labeled by wavelength. #Each plane corresponds to a", "2, numT)) self.lambdaKey = arr[:,0] #angstroms, rows, #have tKey which is in days", "enumerate(fn): arr = np.loadtxt(f, skiprows=1, usecols=[0,1,4]) holder[:,:,idx] = arr[:,1:] self.rawDat = holder #%%", "the data; Each 2d plane holds two cols #specific luminosity (erg/s/angstrom) and number", "numBins = arr.shape[0] #3D array will hold all the data; Each 2d plane", "= np.zeros((numBins, 2, numT)) self.lambdaKey = arr[:,0] #angstroms, rows, #have tKey which is", "same for each day holder = np.zeros((numBins, 2, numT)) self.lambdaKey = arr[:,0] #angstroms,", "pair = pairSNE(\"red_p\") pair.loadData(data_dir) #%% if __name__=='__main__': print(pair.rawDat) import matplotlib.pyplot as plt plt.plot(pair.rawDat[:,0,100])", "as np import os class pairSNE(lightCurve): def __init__(self, name): lightCurve.__init__(self, name) def loadData(self,", "pandas as pd import numpy as np import os class pairSNE(lightCurve): def __init__(self,", "as pd import numpy as np import os class pairSNE(lightCurve): def __init__(self, name):", "len(fn) if numT == 0: self.rawDat = None self.tKey = None self.lambdaKey =", "return None self.tKey = [float(f.split(\"_\")[1][1:]) for f in fn] fn = [os.path.join(dir,f) for", "We will store the days and wavelengths once #since they are the same", "in enumerate(fn): arr = np.loadtxt(f, skiprows=1, usecols=[0,1,4]) holder[:,:,idx] = arr[:,1:] self.rawDat = holder", "os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200') pair = pairSNE(\"red_p\") pair.loadData(data_dir) #%% if __name__=='__main__': print(pair.rawDat) import matplotlib.pyplot", "pair.loadData(data_dir) #%% if __name__=='__main__': print(pair.rawDat) import matplotlib.pyplot as plt plt.plot(pair.rawDat[:,0,100]) print(\"name is {}\".format(pair.name))", "import lightCurve import pandas as pd import numpy as np import os class", "import numpy as np import os class pairSNE(lightCurve): def __init__(self, name): lightCurve.__init__(self, name)", "which is in days and correponds to planes #now load in the actual", "in spectra arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins = arr.shape[0] #3D array will hold all", "bins we have in spectra arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins = arr.shape[0] #3D array", "we have in spectra arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins = arr.shape[0] #3D array will", "wavelength #rows are labeled by wavelength. #Each plane corresponds to a day. We", "dir): fn = [f for f in os.listdir(dir) if f[-4:]=='spec'] numT = len(fn)", "the same for each day holder = np.zeros((numBins, 2, numT)) self.lambdaKey = arr[:,0]", "os.listdir(dir) if f[-4:]=='spec'] numT = len(fn) if numT == 0: self.rawDat = None", "each day holder = np.zeros((numBins, 2, numT)) self.lambdaKey = arr[:,0] #angstroms, rows, #have", "f in enumerate(fn): arr = np.loadtxt(f, skiprows=1, usecols=[0,1,4]) holder[:,:,idx] = arr[:,1:] self.rawDat =", "= [f for f in os.listdir(dir) if f[-4:]=='spec'] numT = len(fn) if numT", "= holder #%% if __name__=='__main__': data_dir = os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200') pair = pairSNE(\"red_p\")", "numT = len(fn) if numT == 0: self.rawDat = None self.tKey = None", "for idx, f in enumerate(fn): arr = np.loadtxt(f, skiprows=1, usecols=[0,1,4]) holder[:,:,idx] = arr[:,1:]", "name): lightCurve.__init__(self, name) def loadData(self, dir): fn = [f for f in os.listdir(dir)", "pairSNE(\"red_p\") pair.loadData(data_dir) #%% if __name__=='__main__': print(pair.rawDat) import matplotlib.pyplot as plt plt.plot(pair.rawDat[:,0,100]) print(\"name is", "all the data; Each 2d plane holds two cols #specific luminosity (erg/s/angstrom) and", "fn = [os.path.join(dir,f) for f in fn] #lets read first file to see", "#angstroms, rows, #have tKey which is in days and correponds to planes #now", "wavelengths once #since they are the same for each day holder = np.zeros((numBins,", "holder[:,:,idx] = arr[:,1:] self.rawDat = holder #%% if __name__=='__main__': data_dir = os.path.join(os.path.join(os.getcwd(), \"data\"),", "store the days and wavelengths once #since they are the same for each", "def loadData(self, dir): fn = [f for f in os.listdir(dir) if f[-4:]=='spec'] numT", "holder #%% if __name__=='__main__': data_dir = os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200') pair = pairSNE(\"red_p\") pair.loadData(data_dir)", "and number of photons of that wavelength #rows are labeled by wavelength. #Each", "import os class pairSNE(lightCurve): def __init__(self, name): lightCurve.__init__(self, name) def loadData(self, dir): fn", "in os.listdir(dir) if f[-4:]=='spec'] numT = len(fn) if numT == 0: self.rawDat =", "usecols=[0,1,4]) holder[:,:,idx] = arr[:,1:] self.rawDat = holder #%% if __name__=='__main__': data_dir = os.path.join(os.path.join(os.getcwd(),", "f[-4:]=='spec'] numT = len(fn) if numT == 0: self.rawDat = None self.tKey =", "import pandas as pd import numpy as np import os class pairSNE(lightCurve): def", "np.zeros((numBins, 2, numT)) self.lambdaKey = arr[:,0] #angstroms, rows, #have tKey which is in", "numT == 0: self.rawDat = None self.tKey = None self.lambdaKey = None return", "[os.path.join(dir,f) for f in fn] #lets read first file to see how many", "load in the actual data for idx, f in enumerate(fn): arr = np.loadtxt(f,", "self.rawDat = holder #%% if __name__=='__main__': data_dir = os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200') pair =", "cols #specific luminosity (erg/s/angstrom) and number of photons of that wavelength #rows are", "number of photons of that wavelength #rows are labeled by wavelength. #Each plane", "days and correponds to planes #now load in the actual data for idx,", "and wavelengths once #since they are the same for each day holder =", "lightCurve import lightCurve import pandas as pd import numpy as np import os", "if __name__=='__main__': print(pair.rawDat) import matplotlib.pyplot as plt plt.plot(pair.rawDat[:,0,100]) print(\"name is {}\".format(pair.name)) # %%", "= None self.tKey = None self.lambdaKey = None return None self.tKey = [float(f.split(\"_\")[1][1:])", "def __init__(self, name): lightCurve.__init__(self, name) def loadData(self, dir): fn = [f for f", "to a day. We will store the days and wavelengths once #since they", "hold all the data; Each 2d plane holds two cols #specific luminosity (erg/s/angstrom)", "#lets read first file to see how many bins we have in spectra", "arr[:,1:] self.rawDat = holder #%% if __name__=='__main__': data_dir = os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200') pair", "self.lambdaKey = arr[:,0] #angstroms, rows, #have tKey which is in days and correponds", "= pairSNE(\"red_p\") pair.loadData(data_dir) #%% if __name__=='__main__': print(pair.rawDat) import matplotlib.pyplot as plt plt.plot(pair.rawDat[:,0,100]) print(\"name", "labeled by wavelength. #Each plane corresponds to a day. We will store the", "= np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins = arr.shape[0] #3D array will hold all the data; Each", "correponds to planes #now load in the actual data for idx, f in", "== 0: self.rawDat = None self.tKey = None self.lambdaKey = None return None", "plane corresponds to a day. We will store the days and wavelengths once", "if numT == 0: self.rawDat = None self.tKey = None self.lambdaKey = None", "read first file to see how many bins we have in spectra arr", "the days and wavelengths once #since they are the same for each day", "of photons of that wavelength #rows are labeled by wavelength. #Each plane corresponds", "= len(fn) if numT == 0: self.rawDat = None self.tKey = None self.lambdaKey", "pairSNE(lightCurve): def __init__(self, name): lightCurve.__init__(self, name) def loadData(self, dir): fn = [f for", "wavelength. #Each plane corresponds to a day. We will store the days and", "self.rawDat = None self.tKey = None self.lambdaKey = None return None self.tKey =", "arr.shape[0] #3D array will hold all the data; Each 2d plane holds two", "skiprows=1, usecols=[0,1,4]) holder[:,:,idx] = arr[:,1:] self.rawDat = holder #%% if __name__=='__main__': data_dir =", "__init__(self, name): lightCurve.__init__(self, name) def loadData(self, dir): fn = [f for f in", "f in os.listdir(dir) if f[-4:]=='spec'] numT = len(fn) if numT == 0: self.rawDat", "2d plane holds two cols #specific luminosity (erg/s/angstrom) and number of photons of", "in the actual data for idx, f in enumerate(fn): arr = np.loadtxt(f, skiprows=1,", "holder = np.zeros((numBins, 2, numT)) self.lambdaKey = arr[:,0] #angstroms, rows, #have tKey which", "pd import numpy as np import os class pairSNE(lightCurve): def __init__(self, name): lightCurve.__init__(self,", "lightCurve import pandas as pd import numpy as np import os class pairSNE(lightCurve):", "np.loadtxt(f, skiprows=1, usecols=[0,1,4]) holder[:,:,idx] = arr[:,1:] self.rawDat = holder #%% if __name__=='__main__': data_dir", "np import os class pairSNE(lightCurve): def __init__(self, name): lightCurve.__init__(self, name) def loadData(self, dir):", "f in fn] fn = [os.path.join(dir,f) for f in fn] #lets read first", "None return None self.tKey = [float(f.split(\"_\")[1][1:]) for f in fn] fn = [os.path.join(dir,f)", "#specific luminosity (erg/s/angstrom) and number of photons of that wavelength #rows are labeled", "is in days and correponds to planes #now load in the actual data", "file to see how many bins we have in spectra arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4])", "that wavelength #rows are labeled by wavelength. #Each plane corresponds to a day.", "self.lambdaKey = None return None self.tKey = [float(f.split(\"_\")[1][1:]) for f in fn] fn", "to planes #now load in the actual data for idx, f in enumerate(fn):", "'B200') pair = pairSNE(\"red_p\") pair.loadData(data_dir) #%% if __name__=='__main__': print(pair.rawDat) import matplotlib.pyplot as plt", "None self.tKey = None self.lambdaKey = None return None self.tKey = [float(f.split(\"_\")[1][1:]) for", "are the same for each day holder = np.zeros((numBins, 2, numT)) self.lambdaKey =", "numpy as np import os class pairSNE(lightCurve): def __init__(self, name): lightCurve.__init__(self, name) def", "a day. We will store the days and wavelengths once #since they are", "many bins we have in spectra arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins = arr.shape[0] #3D", "#%% from lightCurve import lightCurve import pandas as pd import numpy as np", "arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins = arr.shape[0] #3D array will hold all the data;", "f in fn] #lets read first file to see how many bins we", "#have tKey which is in days and correponds to planes #now load in", "in fn] fn = [os.path.join(dir,f) for f in fn] #lets read first file", "\"data\"), 'B200') pair = pairSNE(\"red_p\") pair.loadData(data_dir) #%% if __name__=='__main__': print(pair.rawDat) import matplotlib.pyplot as", "for f in fn] #lets read first file to see how many bins", "spectra arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins = arr.shape[0] #3D array will hold all the", "= [os.path.join(dir,f) for f in fn] #lets read first file to see how", "= os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200') pair = pairSNE(\"red_p\") pair.loadData(data_dir) #%% if __name__=='__main__': print(pair.rawDat) import", "fn] fn = [os.path.join(dir,f) for f in fn] #lets read first file to", "#%% if __name__=='__main__': print(pair.rawDat) import matplotlib.pyplot as plt plt.plot(pair.rawDat[:,0,100]) print(\"name is {}\".format(pair.name)) #", "class pairSNE(lightCurve): def __init__(self, name): lightCurve.__init__(self, name) def loadData(self, dir): fn = [f", "os class pairSNE(lightCurve): def __init__(self, name): lightCurve.__init__(self, name) def loadData(self, dir): fn =", "#3D array will hold all the data; Each 2d plane holds two cols", "day. We will store the days and wavelengths once #since they are the", "are labeled by wavelength. #Each plane corresponds to a day. We will store", "name) def loadData(self, dir): fn = [f for f in os.listdir(dir) if f[-4:]=='spec']", "#now load in the actual data for idx, f in enumerate(fn): arr =", "= [float(f.split(\"_\")[1][1:]) for f in fn] fn = [os.path.join(dir,f) for f in fn]", "<filename>pair_sne.py #%% from lightCurve import lightCurve import pandas as pd import numpy as", "#rows are labeled by wavelength. #Each plane corresponds to a day. We will", "holds two cols #specific luminosity (erg/s/angstrom) and number of photons of that wavelength", "days and wavelengths once #since they are the same for each day holder", "once #since they are the same for each day holder = np.zeros((numBins, 2,", "numT)) self.lambdaKey = arr[:,0] #angstroms, rows, #have tKey which is in days and", "for f in os.listdir(dir) if f[-4:]=='spec'] numT = len(fn) if numT == 0:", "fn] #lets read first file to see how many bins we have in", "two cols #specific luminosity (erg/s/angstrom) and number of photons of that wavelength #rows", "day holder = np.zeros((numBins, 2, numT)) self.lambdaKey = arr[:,0] #angstroms, rows, #have tKey", "and correponds to planes #now load in the actual data for idx, f", "lightCurve.__init__(self, name) def loadData(self, dir): fn = [f for f in os.listdir(dir) if", "arr = np.loadtxt(f, skiprows=1, usecols=[0,1,4]) holder[:,:,idx] = arr[:,1:] self.rawDat = holder #%% if", "in days and correponds to planes #now load in the actual data for", "[float(f.split(\"_\")[1][1:]) for f in fn] fn = [os.path.join(dir,f) for f in fn] #lets", "[f for f in os.listdir(dir) if f[-4:]=='spec'] numT = len(fn) if numT ==", "in fn] #lets read first file to see how many bins we have", "rows, #have tKey which is in days and correponds to planes #now load", "see how many bins we have in spectra arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins =", "how many bins we have in spectra arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins = arr.shape[0]", "for each day holder = np.zeros((numBins, 2, numT)) self.lambdaKey = arr[:,0] #angstroms, rows,", "the actual data for idx, f in enumerate(fn): arr = np.loadtxt(f, skiprows=1, usecols=[0,1,4])", "= arr[:,1:] self.rawDat = holder #%% if __name__=='__main__': data_dir = os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200')", "0: self.rawDat = None self.tKey = None self.lambdaKey = None return None self.tKey", "None self.tKey = [float(f.split(\"_\")[1][1:]) for f in fn] fn = [os.path.join(dir,f) for f", "data; Each 2d plane holds two cols #specific luminosity (erg/s/angstrom) and number of", "they are the same for each day holder = np.zeros((numBins, 2, numT)) self.lambdaKey", "fn = [f for f in os.listdir(dir) if f[-4:]=='spec'] numT = len(fn) if", "= arr[:,0] #angstroms, rows, #have tKey which is in days and correponds to", "have in spectra arr = np.loadtxt(fn[0],skiprows=1,usecols=[0,1,4]) numBins = arr.shape[0] #3D array will hold", "photons of that wavelength #rows are labeled by wavelength. #Each plane corresponds to", "= np.loadtxt(f, skiprows=1, usecols=[0,1,4]) holder[:,:,idx] = arr[:,1:] self.rawDat = holder #%% if __name__=='__main__':", "for f in fn] fn = [os.path.join(dir,f) for f in fn] #lets read", "= None return None self.tKey = [float(f.split(\"_\")[1][1:]) for f in fn] fn =", "#since they are the same for each day holder = np.zeros((numBins, 2, numT))", "planes #now load in the actual data for idx, f in enumerate(fn): arr", "(erg/s/angstrom) and number of photons of that wavelength #rows are labeled by wavelength.", "if __name__=='__main__': data_dir = os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200') pair = pairSNE(\"red_p\") pair.loadData(data_dir) #%% if", "first file to see how many bins we have in spectra arr =", "tKey which is in days and correponds to planes #now load in the", "will store the days and wavelengths once #since they are the same for", "arr[:,0] #angstroms, rows, #have tKey which is in days and correponds to planes", "if f[-4:]=='spec'] numT = len(fn) if numT == 0: self.rawDat = None self.tKey", "None self.lambdaKey = None return None self.tKey = [float(f.split(\"_\")[1][1:]) for f in fn]", "corresponds to a day. We will store the days and wavelengths once #since", "__name__=='__main__': data_dir = os.path.join(os.path.join(os.getcwd(), \"data\"), 'B200') pair = pairSNE(\"red_p\") pair.loadData(data_dir) #%% if __name__=='__main__':", "actual data for idx, f in enumerate(fn): arr = np.loadtxt(f, skiprows=1, usecols=[0,1,4]) holder[:,:,idx]", "luminosity (erg/s/angstrom) and number of photons of that wavelength #rows are labeled by", "idx, f in enumerate(fn): arr = np.loadtxt(f, skiprows=1, usecols=[0,1,4]) holder[:,:,idx] = arr[:,1:] self.rawDat", "array will hold all the data; Each 2d plane holds two cols #specific", "will hold all the data; Each 2d plane holds two cols #specific luminosity", "by wavelength. #Each plane corresponds to a day. We will store the days", "data for idx, f in enumerate(fn): arr = np.loadtxt(f, skiprows=1, usecols=[0,1,4]) holder[:,:,idx] =", "self.tKey = None self.lambdaKey = None return None self.tKey = [float(f.split(\"_\")[1][1:]) for f", "= None self.lambdaKey = None return None self.tKey = [float(f.split(\"_\")[1][1:]) for f in", "= arr.shape[0] #3D array will hold all the data; Each 2d plane holds", "#Each plane corresponds to a day. We will store the days and wavelengths", "from lightCurve import lightCurve import pandas as pd import numpy as np import", "loadData(self, dir): fn = [f for f in os.listdir(dir) if f[-4:]=='spec'] numT =" ]
[ "a base64 PNG image of our SeedQR. \"\"\" # create a qrcode of", "version = bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv", "s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53)) return True except OSError: return False", "seed phrases \"\"\" if is_online(): return render_template('panic.html') params = {} # generate a", "params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path']", "\"\"\" Main home page which generates random seed phrases \"\"\" if is_online(): return", "def seed_qr_base64(words): \"\"\" Return a base64 PNG image of our SeedQR. \"\"\" #", "bip39.mnemonic_from_bytes(entropy) return words @app.route(\"/\") def home(): \"\"\" Main home page which generates random", "= seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = \"m/84'/0'/0'\" version = bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main'])", "our SeedQR. \"\"\" return ''.join([str(wordlist.index(w)).zfill(4) for w in words.split()]) def seed_qr_base64(words): \"\"\" Return", "of our seed_qr_string img = qrcode.make( seed_qr_string(words)) # generate a base64 encoding of", "import random from io import BytesIO import socket from binascii import unhexlify, hexlify", "png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO() img.save(im_file, format=\"PNG\") im_b64 = base64.b64encode(im_file.getvalue()) return", "generate a random seed phrase params['entropy'] = random.randbytes(32) # seedQR our our entropy", "hexlify # Trusting SeedSigner's embit library # https://github.com/SeedSigner/embit from embit import bip32 from", "value of our SeedQR. \"\"\" return ''.join([str(wordlist.index(w)).zfill(4) for w in words.split()]) def seed_qr_base64(words):", "embit library # https://github.com/SeedSigner/embit from embit import bip32 from embit import bip39 from", "code creation import qrcode # Trusting Flask as simple web interface from flask", "Trusting Flask as simple web interface from flask import Flask, render_template, request app", "= BytesIO() img.save(im_file, format=\"PNG\") im_b64 = base64.b64encode(im_file.getvalue()) return im_b64.decode() def get_seed_phrase(entropy): \"\"\" Generate", "render_template('panic.html') params = {} # generate a random seed phrase params['entropy'] = random.randbytes(32)", "seed phrase params['entropy'] = random.randbytes(32) # seedQR our our entropy params['words'] = get_seed_phrase(params['entropy'])", "try: s.connect(('1.1.1.1', 53)) return True except OSError: return False def seed_qr_string(words): \"\"\" Return", "Return the string value of our SeedQR. \"\"\" return ''.join([str(wordlist.index(w)).zfill(4) for w in", "params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = \"m/84'/0'/0'\" version = bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main']) root =", "flask import Flask, render_template, request app = Flask(__name__) wordlist = wordlists.bip39.WORDLIST def is_online():", "words.split()]) def seed_qr_base64(words): \"\"\" Return a base64 PNG image of our SeedQR. \"\"\"", "im_b64 = base64.b64encode(im_file.getvalue()) return im_b64.decode() def get_seed_phrase(entropy): \"\"\" Generate random seedphrase \"\"\" words", "in words.split()]) def seed_qr_base64(words): \"\"\" Return a base64 PNG image of our SeedQR.", "for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53)) return", "\"\"\" words = bip39.mnemonic_from_bytes(entropy) return words @app.route(\"/\") def home(): \"\"\" Main home page", "entropy params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words'])", "= wordlists.bip39.WORDLIST def is_online(): \"\"\" Check if we are online Thanks @KeithMukai for", "except OSError: return False def seed_qr_string(words): \"\"\" Return the string value of our", "= bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path']) xpub = xpriv.to_public() params['xpriv']", "import bip39 from embit import wordlists from embit import script from embit.networks import", "xpub = xpriv.to_public() params['xpriv'] = xpriv params['xpub'] = xpub.to_string(version=version) return render_template('index.html', **params, wordlist=wordlist)", "if we are online Thanks @KeithMukai for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s =", "import script from embit.networks import NETWORKS # Trusting qrcode library as offline qr", "import NETWORKS # Trusting qrcode library as offline qr code creation import qrcode", "words @app.route(\"/\") def home(): \"\"\" Main home page which generates random seed phrases", "from embit.networks import NETWORKS # Trusting qrcode library as offline qr code creation", "qr code creation import qrcode # Trusting Flask as simple web interface from", "are online Thanks @KeithMukai for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53)) return True except", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53)) return True except OSError: return False def seed_qr_string(words):", "s.connect(('1.1.1.1', 53)) return True except OSError: return False def seed_qr_string(words): \"\"\" Return the", "of our SeedQR. \"\"\" # create a qrcode of our seed_qr_string img =", "img = qrcode.make( seed_qr_string(words)) # generate a base64 encoding of our png image", "is_online(): return render_template('panic.html') params = {} # generate a random seed phrase params['entropy']", "from embit import wordlists from embit import script from embit.networks import NETWORKS #", "import Flask, render_template, request app = Flask(__name__) wordlist = wordlists.bip39.WORDLIST def is_online(): \"\"\"", "seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = \"m/84'/0'/0'\" version = bip32.detect_version(params['derivation_path'],", "qrcode library as offline qr code creation import qrcode # Trusting Flask as", "from flask import Flask, render_template, request app = Flask(__name__) wordlist = wordlists.bip39.WORDLIST def", "Flask(__name__) wordlist = wordlists.bip39.WORDLIST def is_online(): \"\"\" Check if we are online Thanks", "53)) return True except OSError: return False def seed_qr_string(words): \"\"\" Return the string", "request app = Flask(__name__) wordlist = wordlists.bip39.WORDLIST def is_online(): \"\"\" Check if we", "= Flask(__name__) wordlist = wordlists.bip39.WORDLIST def is_online(): \"\"\" Check if we are online", "of our SeedQR. \"\"\" return ''.join([str(wordlist.index(w)).zfill(4) for w in words.split()]) def seed_qr_base64(words): \"\"\"", "\"\"\" return ''.join([str(wordlist.index(w)).zfill(4) for w in words.split()]) def seed_qr_base64(words): \"\"\" Return a base64", "bip39 from embit import wordlists from embit import script from embit.networks import NETWORKS", "bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = \"m/84'/0'/0'\" version = bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv'])", "seed_qr_string(words): \"\"\" Return the string value of our SeedQR. \"\"\" return ''.join([str(wordlist.index(w)).zfill(4) for", "https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53)) return True except OSError:", "SeedSigner's embit library # https://github.com/SeedSigner/embit from embit import bip32 from embit import bip39", "= base64.b64encode(im_file.getvalue()) return im_b64.decode() def get_seed_phrase(entropy): \"\"\" Generate random seedphrase \"\"\" words =", "BytesIO import socket from binascii import unhexlify, hexlify # Trusting SeedSigner's embit library", "\"m/84'/0'/0'\" version = bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode()", "seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = \"m/84'/0'/0'\" version = bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main']) root", "import json import base64 import random from io import BytesIO import socket from", "from embit import bip32 from embit import bip39 from embit import wordlists from", "random from io import BytesIO import socket from binascii import unhexlify, hexlify #", "Flask, render_template, request app = Flask(__name__) wordlist = wordlists.bip39.WORDLIST def is_online(): \"\"\" Check", "creation import qrcode # Trusting Flask as simple web interface from flask import", "random seed phrase params['entropy'] = random.randbytes(32) # seedQR our our entropy params['words'] =", "root.derive(params['derivation_path']) xpub = xpriv.to_public() params['xpriv'] = xpriv params['xpub'] = xpub.to_string(version=version) return render_template('index.html', **params,", "bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path'])", "def seed_qr_string(words): \"\"\" Return the string value of our SeedQR. \"\"\" return ''.join([str(wordlist.index(w)).zfill(4)", "def home(): \"\"\" Main home page which generates random seed phrases \"\"\" if", "import wordlists from embit import script from embit.networks import NETWORKS # Trusting qrcode", "# Trusting Flask as simple web interface from flask import Flask, render_template, request", "return ''.join([str(wordlist.index(w)).zfill(4) for w in words.split()]) def seed_qr_base64(words): \"\"\" Return a base64 PNG", "our seed_qr_string img = qrcode.make( seed_qr_string(words)) # generate a base64 encoding of our", "our png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO() img.save(im_file, format=\"PNG\") im_b64 = base64.b64encode(im_file.getvalue())", "simple web interface from flask import Flask, render_template, request app = Flask(__name__) wordlist", "create a qrcode of our seed_qr_string img = qrcode.make( seed_qr_string(words)) # generate a", "generate a base64 encoding of our png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO()", "= bip39.mnemonic_from_bytes(entropy) return words @app.route(\"/\") def home(): \"\"\" Main home page which generates", "phrase params['entropy'] = random.randbytes(32) # seedQR our our entropy params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string']", "random.randbytes(32) # seedQR our our entropy params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64']", "= hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path']) xpub = xpriv.to_public() params['xpriv'] = xpriv params['xpub'] =", "base64 PNG image of our SeedQR. \"\"\" # create a qrcode of our", "#!/usr/bin/env python3 import json import base64 import random from io import BytesIO import", "= bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = \"m/84'/0'/0'\" version = bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'],", "script from embit.networks import NETWORKS # Trusting qrcode library as offline qr code", "= random.randbytes(32) # seedQR our our entropy params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words'])", "@app.route(\"/\") def home(): \"\"\" Main home page which generates random seed phrases \"\"\"", "import bip32 from embit import bip39 from embit import wordlists from embit import", "random seedphrase \"\"\" words = bip39.mnemonic_from_bytes(entropy) return words @app.route(\"/\") def home(): \"\"\" Main", "Return a base64 PNG image of our SeedQR. \"\"\" # create a qrcode", "get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = \"m/84'/0'/0'\"", "app = Flask(__name__) wordlist = wordlists.bip39.WORDLIST def is_online(): \"\"\" Check if we are", "words = bip39.mnemonic_from_bytes(entropy) return words @app.route(\"/\") def home(): \"\"\" Main home page which", "# Trusting qrcode library as offline qr code creation import qrcode # Trusting", "base64 encoding of our png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO() img.save(im_file, format=\"PNG\")", "wordlists from embit import script from embit.networks import NETWORKS # Trusting qrcode library", "w in words.split()]) def seed_qr_base64(words): \"\"\" Return a base64 PNG image of our", "library as offline qr code creation import qrcode # Trusting Flask as simple", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53)) return True except OSError: return False def", "encoding of our png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO() img.save(im_file, format=\"PNG\") im_b64", "= {} # generate a random seed phrase params['entropy'] = random.randbytes(32) # seedQR", "SeedQR. \"\"\" return ''.join([str(wordlist.index(w)).zfill(4) for w in words.split()]) def seed_qr_base64(words): \"\"\" Return a", "network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path']) xpub =", "socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53)) return True except OSError: return False def seed_qr_string(words): \"\"\"", "img.save(im_file, format=\"PNG\") im_b64 = base64.b64encode(im_file.getvalue()) return im_b64.decode() def get_seed_phrase(entropy): \"\"\" Generate random seedphrase", "from binascii import unhexlify, hexlify # Trusting SeedSigner's embit library # https://github.com/SeedSigner/embit from", "# https://github.com/SeedSigner/embit from embit import bip32 from embit import bip39 from embit import", "params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = \"m/84'/0'/0'\" version = bip32.detect_version(params['derivation_path'], default=\"xpub\",", "Thanks @KeithMukai for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1',", "page which generates random seed phrases \"\"\" if is_online(): return render_template('panic.html') params =", "xpriv params['xpub'] = xpub.to_string(version=version) return render_template('index.html', **params, wordlist=wordlist) if __name__ == \"__main__\": app.run(debug=True)", "get_seed_phrase(entropy): \"\"\" Generate random seedphrase \"\"\" words = bip39.mnemonic_from_bytes(entropy) return words @app.route(\"/\") def", "= xpriv.to_public() params['xpriv'] = xpriv params['xpub'] = xpub.to_string(version=version) return render_template('index.html', **params, wordlist=wordlist) if", "embit import bip32 from embit import bip39 from embit import wordlists from embit", "python3 import json import base64 import random from io import BytesIO import socket", "# generate a base64 encoding of our png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file =", "return False def seed_qr_string(words): \"\"\" Return the string value of our SeedQR. \"\"\"", "# generate a random seed phrase params['entropy'] = random.randbytes(32) # seedQR our our", "Trusting qrcode library as offline qr code creation import qrcode # Trusting Flask", "https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO() img.save(im_file, format=\"PNG\") im_b64 = base64.b64encode(im_file.getvalue()) return im_b64.decode() def get_seed_phrase(entropy):", "embit import wordlists from embit import script from embit.networks import NETWORKS # Trusting", "\"\"\" Check if we are online Thanks @KeithMukai for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\"", "PNG image of our SeedQR. \"\"\" # create a qrcode of our seed_qr_string", "bip32 from embit import bip39 from embit import wordlists from embit import script", "= qrcode.make( seed_qr_string(words)) # generate a base64 encoding of our png image #", "our entropy params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed'] =", "for w in words.split()]) def seed_qr_base64(words): \"\"\" Return a base64 PNG image of", "seed_qr_base64(words): \"\"\" Return a base64 PNG image of our SeedQR. \"\"\" # create", "\"\"\" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53)) return True except OSError: return", "qrcode # Trusting Flask as simple web interface from flask import Flask, render_template,", "seedQR our our entropy params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words'])", "import socket from binascii import unhexlify, hexlify # Trusting SeedSigner's embit library #", "{} # generate a random seed phrase params['entropy'] = random.randbytes(32) # seedQR our", "base64 import random from io import BytesIO import socket from binascii import unhexlify,", "params = {} # generate a random seed phrase params['entropy'] = random.randbytes(32) #", "root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path']) xpub = xpriv.to_public()", "from embit import script from embit.networks import NETWORKS # Trusting qrcode library as", "random seed phrases \"\"\" if is_online(): return render_template('panic.html') params = {} # generate", "# create a qrcode of our seed_qr_string img = qrcode.make( seed_qr_string(words)) # generate", "\"\"\" Return the string value of our SeedQR. \"\"\" return ''.join([str(wordlist.index(w)).zfill(4) for w", "SeedQR. \"\"\" # create a qrcode of our seed_qr_string img = qrcode.make( seed_qr_string(words))", "base64.b64encode(im_file.getvalue()) return im_b64.decode() def get_seed_phrase(entropy): \"\"\" Generate random seedphrase \"\"\" words = bip39.mnemonic_from_bytes(entropy)", "Main home page which generates random seed phrases \"\"\" if is_online(): return render_template('panic.html')", "hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path']) xpub = xpriv.to_public() params['xpriv'] = xpriv params['xpub'] = xpub.to_string(version=version)", "= root.derive(params['derivation_path']) xpub = xpriv.to_public() params['xpriv'] = xpriv params['xpub'] = xpub.to_string(version=version) return render_template('index.html',", "wordlist = wordlists.bip39.WORDLIST def is_online(): \"\"\" Check if we are online Thanks @KeithMukai", "''.join([str(wordlist.index(w)).zfill(4) for w in words.split()]) def seed_qr_base64(words): \"\"\" Return a base64 PNG image", "our our entropy params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed']", "import base64 import random from io import BytesIO import socket from binascii import", "Trusting SeedSigner's embit library # https://github.com/SeedSigner/embit from embit import bip32 from embit import", "NETWORKS # Trusting qrcode library as offline qr code creation import qrcode #", "xpriv.to_public() params['xpriv'] = xpriv params['xpub'] = xpub.to_string(version=version) return render_template('index.html', **params, wordlist=wordlist) if __name__", "image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO() img.save(im_file, format=\"PNG\") im_b64 = base64.b64encode(im_file.getvalue()) return im_b64.decode()", "if is_online(): return render_template('panic.html') params = {} # generate a random seed phrase", "unhexlify, hexlify # Trusting SeedSigner's embit library # https://github.com/SeedSigner/embit from embit import bip32", "a base64 encoding of our png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO() img.save(im_file,", "False def seed_qr_string(words): \"\"\" Return the string value of our SeedQR. \"\"\" return", "= seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = \"m/84'/0'/0'\" version =", "OSError: return False def seed_qr_string(words): \"\"\" Return the string value of our SeedQR.", "= bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv =", "= get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path'] =", "as offline qr code creation import qrcode # Trusting Flask as simple web", "render_template, request app = Flask(__name__) wordlist = wordlists.bip39.WORDLIST def is_online(): \"\"\" Check if", "Generate random seedphrase \"\"\" words = bip39.mnemonic_from_bytes(entropy) return words @app.route(\"/\") def home(): \"\"\"", "a random seed phrase params['entropy'] = random.randbytes(32) # seedQR our our entropy params['words']", "xpriv = root.derive(params['derivation_path']) xpub = xpriv.to_public() params['xpriv'] = xpriv params['xpub'] = xpub.to_string(version=version) return", "https://github.com/SeedSigner/embit from embit import bip32 from embit import bip39 from embit import wordlists", "the string value of our SeedQR. \"\"\" return ''.join([str(wordlist.index(w)).zfill(4) for w in words.split()])", "interface from flask import Flask, render_template, request app = Flask(__name__) wordlist = wordlists.bip39.WORDLIST", "qrcode.make( seed_qr_string(words)) # generate a base64 encoding of our png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/", "Flask as simple web interface from flask import Flask, render_template, request app =", "phrases \"\"\" if is_online(): return render_template('panic.html') params = {} # generate a random", "return render_template('panic.html') params = {} # generate a random seed phrase params['entropy'] =", "home page which generates random seed phrases \"\"\" if is_online(): return render_template('panic.html') params", "qrcode of our seed_qr_string img = qrcode.make( seed_qr_string(words)) # generate a base64 encoding", "binascii import unhexlify, hexlify # Trusting SeedSigner's embit library # https://github.com/SeedSigner/embit from embit", "is_online(): \"\"\" Check if we are online Thanks @KeithMukai for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392", "seedphrase \"\"\" words = bip39.mnemonic_from_bytes(entropy) return words @app.route(\"/\") def home(): \"\"\" Main home", "params['xpriv'] = xpriv params['xpub'] = xpub.to_string(version=version) return render_template('index.html', **params, wordlist=wordlist) if __name__ ==", "= \"m/84'/0'/0'\" version = bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] =", "which generates random seed phrases \"\"\" if is_online(): return render_template('panic.html') params = {}", "return words @app.route(\"/\") def home(): \"\"\" Main home page which generates random seed", "io import BytesIO import socket from binascii import unhexlify, hexlify # Trusting SeedSigner's", "embit import script from embit.networks import NETWORKS # Trusting qrcode library as offline", "wordlists.bip39.WORDLIST def is_online(): \"\"\" Check if we are online Thanks @KeithMukai for the", "BytesIO() img.save(im_file, format=\"PNG\") im_b64 = base64.b64encode(im_file.getvalue()) return im_b64.decode() def get_seed_phrase(entropy): \"\"\" Generate random", "format=\"PNG\") im_b64 = base64.b64encode(im_file.getvalue()) return im_b64.decode() def get_seed_phrase(entropy): \"\"\" Generate random seedphrase \"\"\"", "embit.networks import NETWORKS # Trusting qrcode library as offline qr code creation import", "as simple web interface from flask import Flask, render_template, request app = Flask(__name__)", "# Trusting SeedSigner's embit library # https://github.com/SeedSigner/embit from embit import bip32 from embit", "\"\"\" Return a base64 PNG image of our SeedQR. \"\"\" # create a", "# https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO() img.save(im_file, format=\"PNG\") im_b64 = base64.b64encode(im_file.getvalue()) return im_b64.decode() def", "web interface from flask import Flask, render_template, request app = Flask(__name__) wordlist =", "json import base64 import random from io import BytesIO import socket from binascii", "from embit import bip39 from embit import wordlists from embit import script from", "@KeithMukai for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53))", "# seedQR our our entropy params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64'] =", "import BytesIO import socket from binascii import unhexlify, hexlify # Trusting SeedSigner's embit", "embit import bip39 from embit import wordlists from embit import script from embit.networks", "of our png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file = BytesIO() img.save(im_file, format=\"PNG\") im_b64 =", "= xpriv params['xpub'] = xpub.to_string(version=version) return render_template('index.html', **params, wordlist=wordlist) if __name__ == \"__main__\":", "def get_seed_phrase(entropy): \"\"\" Generate random seedphrase \"\"\" words = bip39.mnemonic_from_bytes(entropy) return words @app.route(\"/\")", "\"\"\" if is_online(): return render_template('panic.html') params = {} # generate a random seed", "NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path']) xpub = xpriv.to_public() params['xpriv'] = xpriv", "string value of our SeedQR. \"\"\" return ''.join([str(wordlist.index(w)).zfill(4) for w in words.split()]) def", "from io import BytesIO import socket from binascii import unhexlify, hexlify # Trusting", "Check if we are online Thanks @KeithMukai for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s", "\"\"\" # create a qrcode of our seed_qr_string img = qrcode.make( seed_qr_string(words)) #", "a qrcode of our seed_qr_string img = qrcode.make( seed_qr_string(words)) # generate a base64", "home(): \"\"\" Main home page which generates random seed phrases \"\"\" if is_online():", "params['derivation_path'] = \"m/84'/0'/0'\" version = bip32.detect_version(params['derivation_path'], default=\"xpub\", network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint']", "def is_online(): \"\"\" Check if we are online Thanks @KeithMukai for the suggestion!", "im_b64.decode() def get_seed_phrase(entropy): \"\"\" Generate random seedphrase \"\"\" words = bip39.mnemonic_from_bytes(entropy) return words", "params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path']) xpub = xpriv.to_public() params['xpriv'] = xpriv params['xpub']", "default=\"xpub\", network=NETWORKS['main']) root = bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path']) xpub", "socket from binascii import unhexlify, hexlify # Trusting SeedSigner's embit library # https://github.com/SeedSigner/embit", "offline qr code creation import qrcode # Trusting Flask as simple web interface", "return im_b64.decode() def get_seed_phrase(entropy): \"\"\" Generate random seedphrase \"\"\" words = bip39.mnemonic_from_bytes(entropy) return", "bip32.HDKey.from_seed(params['seed'], NETWORKS['main']['xprv']) params['fingerprint'] = hexlify(root.child(0).fingerprint).decode() xpriv = root.derive(params['derivation_path']) xpub = xpriv.to_public() params['xpriv'] =", "online Thanks @KeithMukai for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try:", "im_file = BytesIO() img.save(im_file, format=\"PNG\") im_b64 = base64.b64encode(im_file.getvalue()) return im_b64.decode() def get_seed_phrase(entropy): \"\"\"", "seed_qr_string img = qrcode.make( seed_qr_string(words)) # generate a base64 encoding of our png", "True except OSError: return False def seed_qr_string(words): \"\"\" Return the string value of", "image of our SeedQR. \"\"\" # create a qrcode of our seed_qr_string img", "params['entropy'] = random.randbytes(32) # seedQR our our entropy params['words'] = get_seed_phrase(params['entropy']) params['seed_qr_string'] =", "the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('1.1.1.1', 53)) return True", "we are online Thanks @KeithMukai for the suggestion! https://twitter.com/KeithMukai/status/1470571942000443392 \"\"\" s = socket.socket(socket.AF_INET,", "library # https://github.com/SeedSigner/embit from embit import bip32 from embit import bip39 from embit", "generates random seed phrases \"\"\" if is_online(): return render_template('panic.html') params = {} #", "return True except OSError: return False def seed_qr_string(words): \"\"\" Return the string value", "import unhexlify, hexlify # Trusting SeedSigner's embit library # https://github.com/SeedSigner/embit from embit import", "seed_qr_string(words)) # generate a base64 encoding of our png image # https://jdhao.github.io/2020/03/17/base64_opencv_pil_image_conversion/ im_file", "params['seed_qr_string'] = seed_qr_string(params['words']) params['seed_qr_base64'] = seed_qr_base64(params['words']) params['seed'] = bip39.mnemonic_to_seed(params['words']) params['derivation_path'] = \"m/84'/0'/0'\" version", "import qrcode # Trusting Flask as simple web interface from flask import Flask,", "our SeedQR. \"\"\" # create a qrcode of our seed_qr_string img = qrcode.make(", "\"\"\" Generate random seedphrase \"\"\" words = bip39.mnemonic_from_bytes(entropy) return words @app.route(\"/\") def home():" ]
[ "migrations.CreateModel( name='Pending', fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False,", "models.BooleanField(default=False, editable=False)), ], options={ 'verbose_name': 'Pending user', 'verbose_name_plural': 'Pending users', }, ), migrations.CreateModel(", "1.11.8 on 2018-02-07 21:09 from __future__ import unicode_literals from django.conf import settings from", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "utf-8 -*- # Generated by Django 1.11.8 on 2018-02-07 21:09 from __future__ import", "import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254,", "serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True,", "null=True)), ('email_verified', models.BooleanField(default=False, editable=False)), ], options={ 'verbose_name': 'Pending user', 'verbose_name_plural': 'Pending users', },", "-*- coding: utf-8 -*- # Generated by Django 1.11.8 on 2018-02-07 21:09 from", "serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]", "'Pending users', }, ), migrations.CreateModel( name='Recovery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at',", "user', 'verbose_name_plural': 'Pending users', }, ), migrations.CreateModel( name='Recovery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)), ('admin_token', models.CharField(editable=False, max_length=32, null=True)), ('email_verified', models.BooleanField(default=False, editable=False)), ], options={", "-*- # Generated by Django 1.11.8 on 2018-02-07 21:09 from __future__ import unicode_literals", "editable=False)), ], options={ 'verbose_name': 'Pending user', 'verbose_name_plural': 'Pending users', }, ), migrations.CreateModel( name='Recovery',", "by Django 1.11.8 on 2018-02-07 21:09 from __future__ import unicode_literals from django.conf import", "dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Pending', fields=[", "models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],", "] operations = [ migrations.CreateModel( name='Pending', fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),", "# -*- coding: utf-8 -*- # Generated by Django 1.11.8 on 2018-02-07 21:09", "('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)),", "on 2018-02-07 21:09 from __future__ import unicode_literals from django.conf import settings from django.db", "Django 1.11.8 on 2018-02-07 21:09 from __future__ import unicode_literals from django.conf import settings", "2018-02-07 21:09 from __future__ import unicode_literals from django.conf import settings from django.db import", "max_length=32, null=True)), ('email_verified', models.BooleanField(default=False, editable=False)), ], options={ 'verbose_name': 'Pending user', 'verbose_name_plural': 'Pending users',", "__future__ import unicode_literals from django.conf import settings from django.db import migrations, models import", "null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)), ('admin_token', models.CharField(editable=False, max_length=32, null=True)), ('email_verified', models.BooleanField(default=False, editable=False)), ],", "'0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Pending', fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True,", "django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial", "Generated by Django 1.11.8 on 2018-02-07 21:09 from __future__ import unicode_literals from django.conf", "django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)), ('admin_token', models.CharField(editable=False,", "('email_verified', models.BooleanField(default=False, editable=False)), ], options={ 'verbose_name': 'Pending user', 'verbose_name_plural': 'Pending users', }, ),", "max_length=254, null=True)), ('admin_token', models.CharField(editable=False, max_length=32, null=True)), ('email_verified', models.BooleanField(default=False, editable=False)), ], options={ 'verbose_name': 'Pending", "models.CharField(editable=False, max_length=32, null=True)), ('email_verified', models.BooleanField(default=False, editable=False)), ], options={ 'verbose_name': 'Pending user', 'verbose_name_plural': 'Pending", "= [ migrations.CreateModel( name='Pending', fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)),", "'verbose_name_plural': 'Pending users', }, ), migrations.CreateModel( name='Recovery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)), ('admin_token', models.CharField(editable=False, max_length=32,", "('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Pending', fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE,", "null=True)), ('admin_token', models.CharField(editable=False, max_length=32, null=True)), ('email_verified', models.BooleanField(default=False, editable=False)), ], options={ 'verbose_name': 'Pending user',", "fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1',", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies", "= [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Pending', fields=[ ('user',", "= True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "[ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Pending', fields=[ ('user', models.OneToOneField(editable=False,", "models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254,", "[ migrations.CreateModel( name='Pending', fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token',", "('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)), ('admin_token', models.CharField(editable=False, max_length=32, null=True)), ('email_verified',", "('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True,", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [", "), migrations.CreateModel( name='Recovery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False,", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('auth',", "models.EmailField(blank=True, max_length=254, null=True)), ('admin_token', models.CharField(editable=False, max_length=32, null=True)), ('email_verified', models.BooleanField(default=False, editable=False)), ], options={ 'verbose_name':", "coding: utf-8 -*- # Generated by Django 1.11.8 on 2018-02-07 21:09 from __future__", "True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Pending',", "import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =", "class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Pending', fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False,", "name='Recovery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('user',", "options={ 'verbose_name': 'Pending user', 'verbose_name_plural': 'Pending users', }, ), migrations.CreateModel( name='Recovery', fields=[ ('id',", "on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)),", "21:09 from __future__ import unicode_literals from django.conf import settings from django.db import migrations,", "users', }, ), migrations.CreateModel( name='Recovery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)),", "'verbose_name': 'Pending user', 'verbose_name_plural': 'Pending users', }, ), migrations.CreateModel( name='Recovery', fields=[ ('id', models.AutoField(auto_created=True,", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class", "initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)), ('admin_token', models.CharField(editable=False, max_length=32, null=True)), ('email_verified', models.BooleanField(default=False,", "# Generated by Django 1.11.8 on 2018-02-07 21:09 from __future__ import unicode_literals from", "}, ), migrations.CreateModel( name='Recovery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('token',", "primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ),", "primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2',", "], options={ 'verbose_name': 'Pending user', 'verbose_name_plural': 'Pending users', }, ), migrations.CreateModel( name='Recovery', fields=[", "operations = [ migrations.CreateModel( name='Pending', fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at',", "('admin_token', models.CharField(editable=False, max_length=32, null=True)), ('email_verified', models.BooleanField(default=False, editable=False)), ], options={ 'verbose_name': 'Pending user', 'verbose_name_plural':", "migrations.CreateModel( name='Recovery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)),", "settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True", "name='Pending', fields=[ ('user', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)), ('created_at', models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)),", "'Pending user', 'verbose_name_plural': 'Pending users', }, ), migrations.CreateModel( name='Recovery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "models.DateTimeField(auto_now_add=True)), ('token', models.CharField(editable=False, max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)), ('admin_token',", "max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)), ('admin_token', models.CharField(editable=False, max_length=32, null=True)), ('email_verified', models.BooleanField(default=False, editable=False)),", "max_length=32)), ('admin_contact_1', models.EmailField(blank=True, max_length=254, null=True)), ('admin_contact_2', models.EmailField(blank=True, max_length=254, null=True)), ('admin_token', models.CharField(editable=False, max_length=32, null=True))," ]
[ "in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)): cur_test = { 'Cost': 1.0, } cur_test['InputFile'] =", "int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail'] = False if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] = { 'Type':", "* 1024) dst_prob['StopAfterFirstFail'] = False if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] = { 'Type': 'TTextChecker',", "indent=2)) def main(args): if len(args) <= 1: print('Usage: ./testerize.py <problem directory>') return 1", "open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def main(args): if len(args) <= 1: print('Usage: ./testerize.py", "test_id, out_fmt)): cur_test = { 'Cost': 1.0, } cur_test['InputFile'] = in_fmt % test_id", "test_num, test_fmt): return os.path.join(path, 'tests', test_fmt % test_num) def testerize(path): src_prob = json.loads(open(os.path.join(path,", "{ 'StripSpaces': True } } dst_prob['Version'] = { 'Build': 129, 'Major': 1, 'Minor':", "./testerize.py <problem directory>') return 1 testerize(args[1]) return 0 if __name__ == '__main__': sys.exit(main(sys.argv))", "'secpInOutAns' } } else: dst_prob['Checker'] = { 'Type': 'TFileCompareChecker', 'Value': { 'StripSpaces': True", "test_id += 1 test_cost = 0 if test_id == 1 else 100.0 /", "dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail'] = False", "else: dst_prob['Checker'] = { 'Type': 'TFileCompareChecker', 'Value': { 'StripSpaces': True } } dst_prob['Version']", "get_test(path, test_num, test_fmt): return os.path.join(path, 'tests', test_fmt % test_num) def testerize(path): src_prob =", "'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def main(args): if len(args) <= 1: print('Usage: ./testerize.py <problem directory>')", "'' else src_prob['input'] dst_prob['OutputFile'] = 'stdout' if src_prob['output'] == '' else src_prob['output'] dst_prob['TimeLimit']", "'Type': 'TFileCompareChecker', 'Value': { 'StripSpaces': True } } dst_prob['Version'] = { 'Build': 129,", "= { 'Type': 'TTextChecker', 'Value': { 'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns' } } else:", "= 'stdout' if src_prob['output'] == '' else src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000)", "while os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)): cur_test = { 'Cost': 1.0,", "'' } dst_prob['TestList'] = [] test_id = 1 while os.path.exists(get_test(path, test_id, in_fmt)) or", "'Minor': 2, 'Release': 3, 'Tag': '' } dst_prob['TestList'] = [] test_id = 1", "= json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob = {} dst_prob['InputFile'] = 'stdin' if src_prob['input'] ==", "'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns' } } else: dst_prob['Checker'] = { 'Type': 'TFileCompareChecker', 'Value':", "= 'stdin' if src_prob['input'] == '' else src_prob['input'] dst_prob['OutputFile'] = 'stdout' if src_prob['output']", "import sys import json in_fmt = '%d.in' out_fmt = '%d.out' def get_test(path, test_num,", "== '' else src_prob['input'] dst_prob['OutputFile'] = 'stdout' if src_prob['output'] == '' else src_prob['output']", "cur_test = { 'Cost': 1.0, } cur_test['InputFile'] = in_fmt % test_id cur_test['OutputFile'] =", "'Cost': 1.0, } cur_test['InputFile'] = in_fmt % test_id cur_test['OutputFile'] = out_fmt % test_id", "= int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail'] = False if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] = {", "src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail'] =", "{ 'Type': 'TTextChecker', 'Value': { 'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns' } } else: dst_prob['Checker']", "'StripSpaces': True } } dst_prob['Version'] = { 'Build': 129, 'Major': 1, 'Minor': 2,", "if test_id == 1 else 100.0 / (test_id - 1) for i in", "dst_prob['Version'] = { 'Build': 129, 'Major': 1, 'Minor': 2, 'Release': 3, 'Tag': ''", "'Type': 'TTextChecker', 'Value': { 'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns' } } else: dst_prob['Checker'] =", "129, 'Major': 1, 'Minor': 2, 'Release': 3, 'Tag': '' } dst_prob['TestList'] = []", "(test_id - 1) for i in range(test_id - 1): dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path,", "test_id dst_prob['TestList'] += [cur_test] test_id += 1 test_cost = 0 if test_id ==", "int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail'] = False if os.path.exists(os.path.join(path,", "'TTextChecker', 'Value': { 'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns' } } else: dst_prob['Checker'] = {", "{ 'Build': 129, 'Major': 1, 'Minor': 2, 'Release': 3, 'Tag': '' } dst_prob['TestList']", "1.0, } cur_test['InputFile'] = in_fmt % test_id cur_test['OutputFile'] = out_fmt % test_id dst_prob['TestList']", "= 0 if test_id == 1 else 100.0 / (test_id - 1) for", "for i in range(test_id - 1): dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob,", "'ParamsPolicy': 'secpInOutAns' } } else: dst_prob['Checker'] = { 'Type': 'TFileCompareChecker', 'Value': { 'StripSpaces':", "'Major': 1, 'Minor': 2, 'Release': 3, 'Tag': '' } dst_prob['TestList'] = [] test_id", "'checker.cpp')): dst_prob['Checker'] = { 'Type': 'TTextChecker', 'Value': { 'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns' }", "os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)): cur_test = { 'Cost': 1.0, }", "'tests', test_fmt % test_num) def testerize(path): src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob =", "} } dst_prob['Version'] = { 'Build': 129, 'Major': 1, 'Minor': 2, 'Release': 3,", "= test_cost open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def main(args): if len(args) <= 1:", "src_prob['output'] == '' else src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit']", "import os import sys import json in_fmt = '%d.in' out_fmt = '%d.out' def", "= '%d.out' def get_test(path, test_num, test_fmt): return os.path.join(path, 'tests', test_fmt % test_num) def", "test_cost open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def main(args): if len(args) <= 1: print('Usage:", "import json in_fmt = '%d.in' out_fmt = '%d.out' def get_test(path, test_num, test_fmt): return", "def testerize(path): src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob = {} dst_prob['InputFile'] = 'stdin'", "+= 1 test_cost = 0 if test_id == 1 else 100.0 / (test_id", "main(args): if len(args) <= 1: print('Usage: ./testerize.py <problem directory>') return 1 testerize(args[1]) return", "test_id == 1 else 100.0 / (test_id - 1) for i in range(test_id", "= { 'Cost': 1.0, } cur_test['InputFile'] = in_fmt % test_id cur_test['OutputFile'] = out_fmt", "out_fmt % test_id dst_prob['TestList'] += [cur_test] test_id += 1 test_cost = 0 if", "= [] test_id = 1 while os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)):", "testerize(path): src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob = {} dst_prob['InputFile'] = 'stdin' if", "= int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail'] = False if", "1024) dst_prob['StopAfterFirstFail'] = False if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] = { 'Type': 'TTextChecker', 'Value':", "len(args) <= 1: print('Usage: ./testerize.py <problem directory>') return 1 testerize(args[1]) return 0 if", "[] test_id = 1 while os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)): cur_test", "2, 'Release': 3, 'Tag': '' } dst_prob['TestList'] = [] test_id = 1 while", "1 while os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)): cur_test = { 'Cost':", "else src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail']", "100.0 / (test_id - 1) for i in range(test_id - 1): dst_prob['TestList'][i]['Cost'] =", "'r').read())['problem'] dst_prob = {} dst_prob['InputFile'] = 'stdin' if src_prob['input'] == '' else src_prob['input']", "if src_prob['output'] == '' else src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit'] =", "json in_fmt = '%d.in' out_fmt = '%d.out' def get_test(path, test_num, test_fmt): return os.path.join(path,", "= 1 while os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)): cur_test = {", "% test_id cur_test['OutputFile'] = out_fmt % test_id dst_prob['TestList'] += [cur_test] test_id += 1", "in_fmt % test_id cur_test['OutputFile'] = out_fmt % test_id dst_prob['TestList'] += [cur_test] test_id +=", "= { 'Build': 129, 'Major': 1, 'Minor': 2, 'Release': 3, 'Tag': '' }", "} dst_prob['TestList'] = [] test_id = 1 while os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path,", "/ (test_id - 1) for i in range(test_id - 1): dst_prob['TestList'][i]['Cost'] = test_cost", "= '%d.in' out_fmt = '%d.out' def get_test(path, test_num, test_fmt): return os.path.join(path, 'tests', test_fmt", "% test_id dst_prob['TestList'] += [cur_test] test_id += 1 test_cost = 0 if test_id", "in_fmt = '%d.in' out_fmt = '%d.out' def get_test(path, test_num, test_fmt): return os.path.join(path, 'tests',", "test_fmt % test_num) def testerize(path): src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob = {}", "= {} dst_prob['InputFile'] = 'stdin' if src_prob['input'] == '' else src_prob['input'] dst_prob['OutputFile'] =", "src_prob['input'] dst_prob['OutputFile'] = 'stdout' if src_prob['output'] == '' else src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit']", "} cur_test['InputFile'] = in_fmt % test_id cur_test['OutputFile'] = out_fmt % test_id dst_prob['TestList'] +=", "1 test_cost = 0 if test_id == 1 else 100.0 / (test_id -", "in range(test_id - 1): dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def", "'stdout' if src_prob['output'] == '' else src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit']", "dst_prob['TestList'] += [cur_test] test_id += 1 test_cost = 0 if test_id == 1", "{} dst_prob['InputFile'] = 'stdin' if src_prob['input'] == '' else src_prob['input'] dst_prob['OutputFile'] = 'stdout'", "0 if test_id == 1 else 100.0 / (test_id - 1) for i", "def get_test(path, test_num, test_fmt): return os.path.join(path, 'tests', test_fmt % test_num) def testerize(path): src_prob", "cur_test['InputFile'] = in_fmt % test_id cur_test['OutputFile'] = out_fmt % test_id dst_prob['TestList'] += [cur_test]", "os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] = { 'Type': 'TTextChecker', 'Value': { 'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns'", "if src_prob['input'] == '' else src_prob['input'] dst_prob['OutputFile'] = 'stdout' if src_prob['output'] == ''", "if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] = { 'Type': 'TTextChecker', 'Value': { 'CheckerFileName': 'checker.exe', 'ParamsPolicy':", "test_id cur_test['OutputFile'] = out_fmt % test_id dst_prob['TestList'] += [cur_test] test_id += 1 test_cost", "3, 'Tag': '' } dst_prob['TestList'] = [] test_id = 1 while os.path.exists(get_test(path, test_id,", "1, 'Minor': 2, 'Release': 3, 'Tag': '' } dst_prob['TestList'] = [] test_id =", "'TFileCompareChecker', 'Value': { 'StripSpaces': True } } dst_prob['Version'] = { 'Build': 129, 'Major':", "json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob = {} dst_prob['InputFile'] = 'stdin' if src_prob['input'] == ''", "<= 1: print('Usage: ./testerize.py <problem directory>') return 1 testerize(args[1]) return 0 if __name__", "1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail'] = False if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker']", "False if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] = { 'Type': 'TTextChecker', 'Value': { 'CheckerFileName': 'checker.exe',", "} } else: dst_prob['Checker'] = { 'Type': 'TFileCompareChecker', 'Value': { 'StripSpaces': True }", "out_fmt)): cur_test = { 'Cost': 1.0, } cur_test['InputFile'] = in_fmt % test_id cur_test['OutputFile']", "= out_fmt % test_id dst_prob['TestList'] += [cur_test] test_id += 1 test_cost = 0", "} else: dst_prob['Checker'] = { 'Type': 'TFileCompareChecker', 'Value': { 'StripSpaces': True } }", "os import sys import json in_fmt = '%d.in' out_fmt = '%d.out' def get_test(path,", "or os.path.exists(get_test(path, test_id, out_fmt)): cur_test = { 'Cost': 1.0, } cur_test['InputFile'] = in_fmt", "[cur_test] test_id += 1 test_cost = 0 if test_id == 1 else 100.0", "'w').write(json.dumps(dst_prob, indent=2)) def main(args): if len(args) <= 1: print('Usage: ./testerize.py <problem directory>') return", "print('Usage: ./testerize.py <problem directory>') return 1 testerize(args[1]) return 0 if __name__ == '__main__':", "else src_prob['input'] dst_prob['OutputFile'] = 'stdout' if src_prob['output'] == '' else src_prob['output'] dst_prob['TimeLimit'] =", "} dst_prob['Version'] = { 'Build': 129, 'Major': 1, 'Minor': 2, 'Release': 3, 'Tag':", "1) for i in range(test_id - 1): dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path, 'tests', 'props.json'),", "dst_prob['OutputFile'] = 'stdout' if src_prob['output'] == '' else src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit'] *", "out_fmt = '%d.out' def get_test(path, test_num, test_fmt): return os.path.join(path, 'tests', test_fmt % test_num)", "dst_prob['StopAfterFirstFail'] = False if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] = { 'Type': 'TTextChecker', 'Value': {", "= False if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] = { 'Type': 'TTextChecker', 'Value': { 'CheckerFileName':", "test_id = 1 while os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)): cur_test =", "'Tag': '' } dst_prob['TestList'] = [] test_id = 1 while os.path.exists(get_test(path, test_id, in_fmt))", "= in_fmt % test_id cur_test['OutputFile'] = out_fmt % test_id dst_prob['TestList'] += [cur_test] test_id", "src_prob['input'] == '' else src_prob['input'] dst_prob['OutputFile'] = 'stdout' if src_prob['output'] == '' else", "return os.path.join(path, 'tests', test_fmt % test_num) def testerize(path): src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem']", "cur_test['OutputFile'] = out_fmt % test_id dst_prob['TestList'] += [cur_test] test_id += 1 test_cost =", "i in range(test_id - 1): dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2))", "src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob = {} dst_prob['InputFile'] = 'stdin' if src_prob['input']", "#!/usr/bin/env python3 import os import sys import json in_fmt = '%d.in' out_fmt =", "'' else src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024)", "'problem.json'), 'r').read())['problem'] dst_prob = {} dst_prob['InputFile'] = 'stdin' if src_prob['input'] == '' else", "dst_prob['Checker'] = { 'Type': 'TFileCompareChecker', 'Value': { 'StripSpaces': True } } dst_prob['Version'] =", "sys import json in_fmt = '%d.in' out_fmt = '%d.out' def get_test(path, test_num, test_fmt):", "== 1 else 100.0 / (test_id - 1) for i in range(test_id -", "1 else 100.0 / (test_id - 1) for i in range(test_id - 1):", "1): dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def main(args): if len(args)", "'Release': 3, 'Tag': '' } dst_prob['TestList'] = [] test_id = 1 while os.path.exists(get_test(path,", "dst_prob['TestList'] = [] test_id = 1 while os.path.exists(get_test(path, test_id, in_fmt)) or os.path.exists(get_test(path, test_id,", "dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def main(args): if len(args) <=", "os.path.join(path, 'tests', test_fmt % test_num) def testerize(path): src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob", "'stdin' if src_prob['input'] == '' else src_prob['input'] dst_prob['OutputFile'] = 'stdout' if src_prob['output'] ==", "'Build': 129, 'Major': 1, 'Minor': 2, 'Release': 3, 'Tag': '' } dst_prob['TestList'] =", "'Value': { 'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns' } } else: dst_prob['Checker'] = { 'Type':", "'checker.exe', 'ParamsPolicy': 'secpInOutAns' } } else: dst_prob['Checker'] = { 'Type': 'TFileCompareChecker', 'Value': {", "== '' else src_prob['output'] dst_prob['TimeLimit'] = int(src_prob['timeLimit'] * 1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] *", "'Value': { 'StripSpaces': True } } dst_prob['Version'] = { 'Build': 129, 'Major': 1,", "+= [cur_test] test_id += 1 test_cost = 0 if test_id == 1 else", "os.path.exists(get_test(path, test_id, out_fmt)): cur_test = { 'Cost': 1.0, } cur_test['InputFile'] = in_fmt %", "test_num) def testerize(path): src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob = {} dst_prob['InputFile'] =", "dst_prob = {} dst_prob['InputFile'] = 'stdin' if src_prob['input'] == '' else src_prob['input'] dst_prob['OutputFile']", "'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def main(args): if len(args) <= 1: print('Usage: ./testerize.py <problem", "{ 'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns' } } else: dst_prob['Checker'] = { 'Type': 'TFileCompareChecker',", "range(test_id - 1): dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def main(args):", "test_cost = 0 if test_id == 1 else 100.0 / (test_id - 1)", "'%d.out' def get_test(path, test_num, test_fmt): return os.path.join(path, 'tests', test_fmt % test_num) def testerize(path):", "'%d.in' out_fmt = '%d.out' def get_test(path, test_num, test_fmt): return os.path.join(path, 'tests', test_fmt %", "def main(args): if len(args) <= 1: print('Usage: ./testerize.py <problem directory>') return 1 testerize(args[1])", "python3 import os import sys import json in_fmt = '%d.in' out_fmt = '%d.out'", "{ 'Type': 'TFileCompareChecker', 'Value': { 'StripSpaces': True } } dst_prob['Version'] = { 'Build':", "* 1000) dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail'] = False if os.path.exists(os.path.join(path, 'checker.cpp')):", "dst_prob['MemoryLimit'] = int(src_prob['memoryLimit'] * 1024) dst_prob['StopAfterFirstFail'] = False if os.path.exists(os.path.join(path, 'checker.cpp')): dst_prob['Checker'] =", "True } } dst_prob['Version'] = { 'Build': 129, 'Major': 1, 'Minor': 2, 'Release':", "else 100.0 / (test_id - 1) for i in range(test_id - 1): dst_prob['TestList'][i]['Cost']", "dst_prob['InputFile'] = 'stdin' if src_prob['input'] == '' else src_prob['input'] dst_prob['OutputFile'] = 'stdout' if", "- 1) for i in range(test_id - 1): dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path, 'tests',", "= { 'Type': 'TFileCompareChecker', 'Value': { 'StripSpaces': True } } dst_prob['Version'] = {", "1: print('Usage: ./testerize.py <problem directory>') return 1 testerize(args[1]) return 0 if __name__ ==", "% test_num) def testerize(path): src_prob = json.loads(open(os.path.join(path, 'problem.json'), 'r').read())['problem'] dst_prob = {} dst_prob['InputFile']", "test_fmt): return os.path.join(path, 'tests', test_fmt % test_num) def testerize(path): src_prob = json.loads(open(os.path.join(path, 'problem.json'),", "{ 'Cost': 1.0, } cur_test['InputFile'] = in_fmt % test_id cur_test['OutputFile'] = out_fmt %", "if len(args) <= 1: print('Usage: ./testerize.py <problem directory>') return 1 testerize(args[1]) return 0", "dst_prob['Checker'] = { 'Type': 'TTextChecker', 'Value': { 'CheckerFileName': 'checker.exe', 'ParamsPolicy': 'secpInOutAns' } }", "test_id, in_fmt)) or os.path.exists(get_test(path, test_id, out_fmt)): cur_test = { 'Cost': 1.0, } cur_test['InputFile']", "- 1): dst_prob['TestList'][i]['Cost'] = test_cost open(os.path.join(path, 'tests', 'props.json'), 'w').write(json.dumps(dst_prob, indent=2)) def main(args): if" ]
[ "= features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected) def testTokenizer(self): \"\"\"Tests if tokenizer is loaded", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "python3 \"\"\"Tests for ClincIntentDetectionDataset.\"\"\" from absl.testing import parameterized import numpy as np import", "coding=utf-8 # Copyright 2020 The Uncertainty Baselines Authors. # # Licensed under the", "= element['num_tokens'] # compute number of tokens expected num_tokens_expected = np.sum(features.numpy() != 0,", "if all data modes can be loaded correctly.\"\"\" batch_size = 9 eval_batch_size =", "License. # Lint as: python3 \"\"\"Tests for ClincIntentDetectionDataset.\"\"\" from absl.testing import parameterized import", "labels = element['labels'] expected_batch_size = ( batch_size if split == base.Split.TRAIN else eval_batch_size)", "this file except in compliance with the License. # You may obtain a", "next(iter(dataset)) features = element['features'] num_tokens = element['num_tokens'] # compute number of tokens expected", "= 9 eval_batch_size = 5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size,", "= next(iter(dataset)) features = element['features'] _, features_length = features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected)", "element['labels'] expected_batch_size = ( batch_size if split == base.Split.TRAIN else eval_batch_size) feature_shape, _", "ANY KIND, either express or implied. # See the License for the specific", "clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation', base.Split.VAL), ('Test', base.Split.TEST)) def testDatasetSize(self, split):", "\"\"\"Tests for ClincIntentDetectionDataset.\"\"\" from absl.testing import parameterized import numpy as np import tensorflow", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD), ('All', 'all', clinc_intent._NUM_TRAIN_ALL))", "features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected) def testTokenizer(self): \"\"\"Tests if tokenizer is loaded correctly.\"\"\"", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "tensorflow as tf import uncertainty_baselines as ub from uncertainty_baselines.datasets import base from uncertainty_baselines.datasets", "= element['features'] _, features_length = features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected) def testTokenizer(self): \"\"\"Tests", "class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation', base.Split.VAL), ('Test', base.Split.TEST)) def testDatasetSize(self, split): batch_size", "OF ANY KIND, either express or implied. # See the License for the", "('Test', base.Split.TEST)) def testDatasetSize(self, split): batch_size = 9 eval_batch_size = 5 dataset_builder =", "= 5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode) num_train_examples", "9 eval_batch_size = 5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,", "numpy as np import tensorflow as tf import uncertainty_baselines as ub from uncertainty_baselines.datasets", "features = element['features'] _, features_length = features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected) def testTokenizer(self):", "for ClincIntentDetectionDataset.\"\"\" from absl.testing import parameterized import numpy as np import tensorflow as", "( batch_size if split == base.Split.TRAIN else eval_batch_size) feature_shape, _ = features.shape labels_shape", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "import numpy as np import tensorflow as tf import uncertainty_baselines as ub from", "num_tokens_expected = np.sum(features.numpy() != 0, axis=-1) num_tokens_loaded = num_tokens.numpy() np.testing.assert_array_equal(num_tokens_loaded, num_tokens_expected) if __name__", "dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] num_tokens = element['num_tokens'] #", "element = next(iter(dataset)) features = element['features'] _, features_length = features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples,", "\"\"\"Tests if num_tokens field is loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "ClincIntentDetectionDataset.\"\"\" from absl.testing import parameterized import numpy as np import tensorflow as tf", "element = next(iter(dataset)) features = element['features'] num_tokens = element['num_tokens'] # compute number of", "field is loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5 split = base.Split.TRAIN", "modes can be loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5 split =", "dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode) num_train_examples = dataset_builder.info['num_train_examples'] dataset = dataset_builder.build(split).take(1)", "clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD), ('All', 'all', clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self, data_mode, num_train_examples_expected): \"\"\"Tests if", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "Authors. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "expected_batch_size = ( batch_size if split == base.Split.TRAIN else eval_batch_size) feature_shape, _ =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "correctly.\"\"\" dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5, shuffle_buffer_size=20, ) # The number of valid", "batch_size=9, eval_batch_size=5, shuffle_buffer_size=20, ) # The number of valid tokens. vocab_size = dataset_builder.tokenizer.num_words", "== base.Split.TRAIN else eval_batch_size) feature_shape, _ = features.shape labels_shape = labels.shape self.assertEqual(feature_shape, expected_batch_size)", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "tokens. vocab_size = dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291) def testNumTokens(self): \"\"\"Tests if num_tokens field is", "batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode) num_train_examples = dataset_builder.info['num_train_examples'] dataset = dataset_builder.build(split).take(1) element = next(iter(dataset))", "required by applicable law or agreed to in writing, software # distributed under", "eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode) num_train_examples = dataset_builder.info['num_train_examples'] dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features", "applicable law or agreed to in writing, software # distributed under the License", "= features.shape labels_shape = labels.shape self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND), ('OOD',", "batch_size if split == base.Split.TRAIN else eval_batch_size) feature_shape, _ = features.shape labels_shape =", "shuffle_buffer_size=20) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] labels = element['labels']", "eval_batch_size = 5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode)", "ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features']", "vocab_size = dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291) def testNumTokens(self): \"\"\"Tests if num_tokens field is loaded", "or agreed to in writing, software # distributed under the License is distributed", "base.Split.TRAIN), ('Validation', base.Split.VAL), ('Test', base.Split.TEST)) def testDatasetSize(self, split): batch_size = 9 eval_batch_size =", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "governing permissions and # limitations under the License. # Lint as: python3 \"\"\"Tests", "# The number of valid tokens. vocab_size = dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291) def testNumTokens(self):", "= element['labels'] expected_batch_size = ( batch_size if split == base.Split.TRAIN else eval_batch_size) feature_shape,", "base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode) num_train_examples = dataset_builder.info['num_train_examples'] dataset =", "@parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD), ('All', 'all', clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self, data_mode, num_train_examples_expected):", "features = element['features'] num_tokens = element['num_tokens'] # compute number of tokens expected num_tokens_expected", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "_, features_length = features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected) def testTokenizer(self): \"\"\"Tests if tokenizer", "License. # You may obtain a copy of the License at # #", "uncertainty_baselines.datasets import clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation', base.Split.VAL), ('Test', base.Split.TEST)) def", "Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the", "testTokenizer(self): \"\"\"Tests if tokenizer is loaded correctly.\"\"\" dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5, shuffle_buffer_size=20,", "compliance with the License. # You may obtain a copy of the License", "= np.sum(features.numpy() != 0, axis=-1) num_tokens_loaded = num_tokens.numpy() np.testing.assert_array_equal(num_tokens_loaded, num_tokens_expected) if __name__ ==", "num_tokens = element['num_tokens'] # compute number of tokens expected num_tokens_expected = np.sum(features.numpy() !=", "features.shape labels_shape = labels.shape self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood',", "('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD), ('All', 'all', clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self, data_mode, num_train_examples_expected): \"\"\"Tests if all", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "feature_shape, _ = features.shape labels_shape = labels.shape self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind',", "dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] _, features_length = features.shape", "'ood', clinc_intent._NUM_TRAIN_OOD), ('All', 'all', clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self, data_mode, num_train_examples_expected): \"\"\"Tests if all data", "is loaded correctly.\"\"\" dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5, shuffle_buffer_size=20, ) # The number", "specific language governing permissions and # limitations under the License. # Lint as:", "absl.testing import parameterized import numpy as np import tensorflow as tf import uncertainty_baselines", "not use this file except in compliance with the License. # You may", "batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] num_tokens", "= ( batch_size if split == base.Split.TRAIN else eval_batch_size) feature_shape, _ = features.shape", "correctly.\"\"\" batch_size = 9 eval_batch_size = 5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset(", "License, Version 2.0 (the \"License\"); # you may not use this file except", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "and # limitations under the License. # Lint as: python3 \"\"\"Tests for ClincIntentDetectionDataset.\"\"\"", "# compute number of tokens expected num_tokens_expected = np.sum(features.numpy() != 0, axis=-1) num_tokens_loaded", "eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] labels =", "batch_size = 9 eval_batch_size = 5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size,", "# you may not use this file except in compliance with the License.", "data_mode, num_train_examples_expected): \"\"\"Tests if all data modes can be loaded correctly.\"\"\" batch_size =", "'ind', clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD), ('All', 'all', clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self, data_mode, num_train_examples_expected): \"\"\"Tests", "# Copyright 2020 The Uncertainty Baselines Authors. # # Licensed under the Apache", "('Validation', base.Split.VAL), ('Test', base.Split.TEST)) def testDatasetSize(self, split): batch_size = 9 eval_batch_size = 5", "dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features", "agreed to in writing, software # distributed under the License is distributed on", "num_train_examples = dataset_builder.info['num_train_examples'] dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] _,", "= dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291) def testNumTokens(self): \"\"\"Tests if num_tokens field is loaded correctly.\"\"\"", "9 eval_batch_size = 5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,)", "dataset_builder.info['num_train_examples'] dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] _, features_length =", "Copyright 2020 The Uncertainty Baselines Authors. # # Licensed under the Apache License,", "base.Split.TEST)) def testDatasetSize(self, split): batch_size = 9 eval_batch_size = 5 dataset_builder = ub.datasets.ClincIntentDetectionDataset(", "(the \"License\"); # you may not use this file except in compliance with", "ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation', base.Split.VAL), ('Test', base.Split.TEST)) def testDatasetSize(self, split): batch_size =", "'all', clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self, data_mode, num_train_examples_expected): \"\"\"Tests if all data modes can be", "# Unless required by applicable law or agreed to in writing, software #", "= base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode) num_train_examples = dataset_builder.info['num_train_examples'] dataset", "by applicable law or agreed to in writing, software # distributed under the", "self.assertEqual(vocab_size, 7291) def testNumTokens(self): \"\"\"Tests if num_tokens field is loaded correctly.\"\"\" batch_size =", "shuffle_buffer_size=20, data_mode=data_mode) num_train_examples = dataset_builder.info['num_train_examples'] dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features =", "7291) def testNumTokens(self): \"\"\"Tests if num_tokens field is loaded correctly.\"\"\" batch_size = 9", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "batch_size = 9 eval_batch_size = 5 dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset", "\"\"\"Tests if tokenizer is loaded correctly.\"\"\" dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5, shuffle_buffer_size=20, )", "testDataMode(self, data_mode, num_train_examples_expected): \"\"\"Tests if all data modes can be loaded correctly.\"\"\" batch_size", "ub from uncertainty_baselines.datasets import base from uncertainty_baselines.datasets import clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train',", "= dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] _, features_length = features.shape self.assertEqual(features_length,", "be loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5 split = base.Split.TRAIN dataset_builder", "file except in compliance with the License. # You may obtain a copy", "# coding=utf-8 # Copyright 2020 The Uncertainty Baselines Authors. # # Licensed under", "as tf import uncertainty_baselines as ub from uncertainty_baselines.datasets import base from uncertainty_baselines.datasets import", "the License. # Lint as: python3 \"\"\"Tests for ClincIntentDetectionDataset.\"\"\" from absl.testing import parameterized", "License for the specific language governing permissions and # limitations under the License.", "<gh_stars>1-10 # coding=utf-8 # Copyright 2020 The Uncertainty Baselines Authors. # # Licensed", "all data modes can be loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5", "to in writing, software # distributed under the License is distributed on an", "language governing permissions and # limitations under the License. # Lint as: python3", "implied. # See the License for the specific language governing permissions and #", "= next(iter(dataset)) features = element['features'] labels = element['labels'] expected_batch_size = ( batch_size if", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "= ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5, shuffle_buffer_size=20, ) # The number of valid tokens. vocab_size", "expected num_tokens_expected = np.sum(features.numpy() != 0, axis=-1) num_tokens_loaded = num_tokens.numpy() np.testing.assert_array_equal(num_tokens_loaded, num_tokens_expected) if", "self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD), ('All', 'all', clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self,", "import uncertainty_baselines as ub from uncertainty_baselines.datasets import base from uncertainty_baselines.datasets import clinc_intent class", "import clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation', base.Split.VAL), ('Test', base.Split.TEST)) def testDatasetSize(self,", "element = next(iter(dataset)) features = element['features'] labels = element['labels'] expected_batch_size = ( batch_size", "base from uncertainty_baselines.datasets import clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation', base.Split.VAL), ('Test',", "ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features']", "element['features'] labels = element['labels'] expected_batch_size = ( batch_size if split == base.Split.TRAIN else", "The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0", "expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD), ('All', 'all', clinc_intent._NUM_TRAIN_ALL)) def", "or implied. # See the License for the specific language governing permissions and", "split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset = dataset_builder.build(split).take(1) element", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "permissions and # limitations under the License. # Lint as: python3 \"\"\"Tests for", "base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset))", "of tokens expected num_tokens_expected = np.sum(features.numpy() != 0, axis=-1) num_tokens_loaded = num_tokens.numpy() np.testing.assert_array_equal(num_tokens_loaded,", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "Lint as: python3 \"\"\"Tests for ClincIntentDetectionDataset.\"\"\" from absl.testing import parameterized import numpy as", "= ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode) num_train_examples = dataset_builder.info['num_train_examples'] dataset = dataset_builder.build(split).take(1) element", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "else eval_batch_size) feature_shape, _ = features.shape labels_shape = labels.shape self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,))", "testNumTokens(self): \"\"\"Tests if num_tokens field is loaded correctly.\"\"\" batch_size = 9 eval_batch_size =", "number of valid tokens. vocab_size = dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291) def testNumTokens(self): \"\"\"Tests if", "num_train_examples_expected): \"\"\"Tests if all data modes can be loaded correctly.\"\"\" batch_size = 9", "split == base.Split.TRAIN else eval_batch_size) feature_shape, _ = features.shape labels_shape = labels.shape self.assertEqual(feature_shape,", "limitations under the License. # Lint as: python3 \"\"\"Tests for ClincIntentDetectionDataset.\"\"\" from absl.testing", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "split): batch_size = 9 eval_batch_size = 5 dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20)", "dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5, shuffle_buffer_size=20, ) # The number of valid tokens.", "= ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features =", "can be loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5 split = base.Split.TRAIN", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "import tensorflow as tf import uncertainty_baselines as ub from uncertainty_baselines.datasets import base from", "you may not use this file except in compliance with the License. #", "element['features'] num_tokens = element['num_tokens'] # compute number of tokens expected num_tokens_expected = np.sum(features.numpy()", "loaded correctly.\"\"\" dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5, shuffle_buffer_size=20, ) # The number of", "is loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5 split = base.Split.TRAIN dataset_builder", "of valid tokens. vocab_size = dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291) def testNumTokens(self): \"\"\"Tests if num_tokens", "element['features'] _, features_length = features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected) def testTokenizer(self): \"\"\"Tests if", ") # The number of valid tokens. vocab_size = dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291) def", "ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5, shuffle_buffer_size=20, ) # The number of valid tokens. vocab_size =", "use this file except in compliance with the License. # You may obtain", "= dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] num_tokens = element['num_tokens'] # compute", "num_tokens field is loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5 split =", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] labels = element['labels'] expected_batch_size = (", "= 9 eval_batch_size = 5 dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset =", "dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] labels = element['labels'] expected_batch_size", "9 eval_batch_size = 5 dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset = dataset_builder.build(split).take(1)", "from uncertainty_baselines.datasets import clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation', base.Split.VAL), ('Test', base.Split.TEST))", "dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] num_tokens = element['num_tokens'] # compute number", "from uncertainty_baselines.datasets import base from uncertainty_baselines.datasets import clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN),", "2.0 (the \"License\"); # you may not use this file except in compliance", "as: python3 \"\"\"Tests for ClincIntentDetectionDataset.\"\"\" from absl.testing import parameterized import numpy as np", "tokens expected num_tokens_expected = np.sum(features.numpy() != 0, axis=-1) num_tokens_loaded = num_tokens.numpy() np.testing.assert_array_equal(num_tokens_loaded, num_tokens_expected)", "for the specific language governing permissions and # limitations under the License. #", "np import tensorflow as tf import uncertainty_baselines as ub from uncertainty_baselines.datasets import base", "eval_batch_size = 5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "= labels.shape self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD), ('All',", "from absl.testing import parameterized import numpy as np import tensorflow as tf import", "@parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation', base.Split.VAL), ('Test', base.Split.TEST)) def testDatasetSize(self, split): batch_size = 9 eval_batch_size", "# # Unless required by applicable law or agreed to in writing, software", "tf import uncertainty_baselines as ub from uncertainty_baselines.datasets import base from uncertainty_baselines.datasets import clinc_intent", "express or implied. # See the License for the specific language governing permissions", "dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features", "= next(iter(dataset)) features = element['features'] num_tokens = element['num_tokens'] # compute number of tokens", "either express or implied. # See the License for the specific language governing", "shuffle_buffer_size=20, ) # The number of valid tokens. vocab_size = dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291)", "uncertainty_baselines.datasets import base from uncertainty_baselines.datasets import clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation',", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "clinc_intent._NUM_TRAIN_OOD), ('All', 'all', clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self, data_mode, num_train_examples_expected): \"\"\"Tests if all data modes", "ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode) num_train_examples = dataset_builder.info['num_train_examples'] dataset = dataset_builder.build(split).take(1) element =", "def testDatasetSize(self, split): batch_size = 9 eval_batch_size = 5 dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size,", "if split == base.Split.TRAIN else eval_batch_size) feature_shape, _ = features.shape labels_shape = labels.shape", "np.sum(features.numpy() != 0, axis=-1) num_tokens_loaded = num_tokens.numpy() np.testing.assert_array_equal(num_tokens_loaded, num_tokens_expected) if __name__ == '__main__':", "# limitations under the License. # Lint as: python3 \"\"\"Tests for ClincIntentDetectionDataset.\"\"\" from", "loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5 split = base.Split.TRAIN dataset_builder =", "num_train_examples_expected) def testTokenizer(self): \"\"\"Tests if tokenizer is loaded correctly.\"\"\" dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=9,", "the License. # You may obtain a copy of the License at #", "element['num_tokens'] # compute number of tokens expected num_tokens_expected = np.sum(features.numpy() != 0, axis=-1)", "split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode) num_train_examples = dataset_builder.info['num_train_examples']", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "eval_batch_size) feature_shape, _ = features.shape labels_shape = labels.shape self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND',", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "5 dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset))", "= 5 dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset = dataset_builder.build(split).take(1) element =", "The number of valid tokens. vocab_size = dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291) def testNumTokens(self): \"\"\"Tests", "= dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] labels = element['labels'] expected_batch_size =", "labels_shape = labels.shape self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD),", "5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20, data_mode=data_mode) num_train_examples =", "with the License. # You may obtain a copy of the License at", "= element['features'] labels = element['labels'] expected_batch_size = ( batch_size if split == base.Split.TRAIN", "data modes can be loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5 split", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "next(iter(dataset)) features = element['features'] _, features_length = features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected) def", "under the License. # Lint as: python3 \"\"\"Tests for ClincIntentDetectionDataset.\"\"\" from absl.testing import", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "import base from uncertainty_baselines.datasets import clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation', base.Split.VAL),", "as ub from uncertainty_baselines.datasets import base from uncertainty_baselines.datasets import clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase, parameterized.TestCase):", "labels.shape self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD), ('All', 'all',", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "next(iter(dataset)) features = element['features'] labels = element['labels'] expected_batch_size = ( batch_size if split", "def testDataMode(self, data_mode, num_train_examples_expected): \"\"\"Tests if all data modes can be loaded correctly.\"\"\"", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] _, features_length = features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH)", "eval_batch_size=5, shuffle_buffer_size=20, ) # The number of valid tokens. vocab_size = dataset_builder.tokenizer.num_words self.assertEqual(vocab_size,", "import parameterized import numpy as np import tensorflow as tf import uncertainty_baselines as", "See the License for the specific language governing permissions and # limitations under", "uncertainty_baselines as ub from uncertainty_baselines.datasets import base from uncertainty_baselines.datasets import clinc_intent class ClincIntentDetectionDatasetTest(tf.test.TestCase,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] num_tokens =", "batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] labels", "parameterized import numpy as np import tensorflow as tf import uncertainty_baselines as ub", "= ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected) def testTokenizer(self): \"\"\"Tests if tokenizer is loaded correctly.\"\"\" dataset_builder =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "valid tokens. vocab_size = dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291) def testNumTokens(self): \"\"\"Tests if num_tokens field", "base.Split.VAL), ('Test', base.Split.TEST)) def testDatasetSize(self, split): batch_size = 9 eval_batch_size = 5 dataset_builder", "def testNumTokens(self): \"\"\"Tests if num_tokens field is loaded correctly.\"\"\" batch_size = 9 eval_batch_size", "features_length = features.shape self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected) def testTokenizer(self): \"\"\"Tests if tokenizer is", "= dataset_builder.info['num_train_examples'] dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] _, features_length", "def testTokenizer(self): \"\"\"Tests if tokenizer is loaded correctly.\"\"\" dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5,", "_ = features.shape labels_shape = labels.shape self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape, (expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND),", "as np import tensorflow as tf import uncertainty_baselines as ub from uncertainty_baselines.datasets import", "Version 2.0 (the \"License\"); # you may not use this file except in", "the specific language governing permissions and # limitations under the License. # Lint", "except in compliance with the License. # You may obtain a copy of", "dataset_builder.tokenizer.num_words self.assertEqual(vocab_size, 7291) def testNumTokens(self): \"\"\"Tests if num_tokens field is loaded correctly.\"\"\" batch_size", "number of tokens expected num_tokens_expected = np.sum(features.numpy() != 0, axis=-1) num_tokens_loaded = num_tokens.numpy()", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "eval_batch_size = 5 dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20) dataset = dataset_builder.build(split).take(1) element", "('All', 'all', clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self, data_mode, num_train_examples_expected): \"\"\"Tests if all data modes can", "shuffle_buffer_size=20,) dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features'] num_tokens = element['num_tokens']", "2020 The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version", "5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset = dataset_builder.build(split).take(1)", "testDatasetSize(self, split): batch_size = 9 eval_batch_size = 5 dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size,", "if tokenizer is loaded correctly.\"\"\" dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5, shuffle_buffer_size=20, ) #", "clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self, data_mode, num_train_examples_expected): \"\"\"Tests if all data modes can be loaded", "base.Split.TRAIN else eval_batch_size) feature_shape, _ = features.shape labels_shape = labels.shape self.assertEqual(feature_shape, expected_batch_size) self.assertEqual(labels_shape,", "compute number of tokens expected num_tokens_expected = np.sum(features.numpy() != 0, axis=-1) num_tokens_loaded =", "if num_tokens field is loaded correctly.\"\"\" batch_size = 9 eval_batch_size = 5 split", "(expected_batch_size,)) @parameterized.named_parameters(('IND', 'ind', clinc_intent._NUM_TRAIN_IND), ('OOD', 'ood', clinc_intent._NUM_TRAIN_OOD), ('All', 'all', clinc_intent._NUM_TRAIN_ALL)) def testDataMode(self, data_mode,", "parameterized.TestCase): @parameterized.named_parameters(('Train', base.Split.TRAIN), ('Validation', base.Split.VAL), ('Test', base.Split.TEST)) def testDatasetSize(self, split): batch_size = 9", "self.assertEqual(num_train_examples, num_train_examples_expected) def testTokenizer(self): \"\"\"Tests if tokenizer is loaded correctly.\"\"\" dataset_builder = ub.datasets.ClincIntentDetectionDataset(", "= base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset = dataset_builder.build(split).take(1) element =", "self.assertEqual(features_length, clinc_intent._FEATURE_LENGTH) self.assertEqual(num_train_examples, num_train_examples_expected) def testTokenizer(self): \"\"\"Tests if tokenizer is loaded correctly.\"\"\" dataset_builder", "data_mode=data_mode) num_train_examples = dataset_builder.info['num_train_examples'] dataset = dataset_builder.build(split).take(1) element = next(iter(dataset)) features = element['features']", "Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "\"\"\"Tests if all data modes can be loaded correctly.\"\"\" batch_size = 9 eval_batch_size", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "= element['features'] num_tokens = element['num_tokens'] # compute number of tokens expected num_tokens_expected =", "= 5 split = base.Split.TRAIN dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=batch_size, eval_batch_size=eval_batch_size, shuffle_buffer_size=20,) dataset =", "!= 0, axis=-1) num_tokens_loaded = num_tokens.numpy() np.testing.assert_array_equal(num_tokens_loaded, num_tokens_expected) if __name__ == '__main__': tf.test.main()", "# Lint as: python3 \"\"\"Tests for ClincIntentDetectionDataset.\"\"\" from absl.testing import parameterized import numpy", "tokenizer is loaded correctly.\"\"\" dataset_builder = ub.datasets.ClincIntentDetectionDataset( batch_size=9, eval_batch_size=5, shuffle_buffer_size=20, ) # The", "features = element['features'] labels = element['labels'] expected_batch_size = ( batch_size if split ==" ]
[ "\"gadfy\", author_email = \"<EMAIL>\", packages = find_packages(), include_package_data = True, platforms = \"any\",", "= \"samebirthdayrate\", #这里是pip项目发布的名称 version = \"1.0.0\", #版本号,数值大的会优先被pip keywords = (\"pip\",\"samebirthdayrate\"), description = \"caculate", "\"MIT Licence\", url = \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author = \"gadfy\", author_email = \"<EMAIL>\", packages", "author = \"gadfy\", author_email = \"<EMAIL>\", packages = find_packages(), include_package_data = True, platforms", "Name: setup.py Author: gadfy \"\"\" from setuptools import setup, find_packages #这个包没有的可以pip一下 with open(\"README.md\",", "from setuptools import setup, find_packages #这个包没有的可以pip一下 with open(\"README.md\", \"r\", encoding=\"utf-8\") as f: long_description", "same birthday rate\", long_description = long_description, license = \"MIT Licence\", url = \"https://narwelplists.herokuapp.com/\",", "= long_description, license = \"MIT Licence\", url = \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author = \"gadfy\",", "long_description, license = \"MIT Licence\", url = \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author = \"gadfy\", author_email", "Licence\", url = \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author = \"gadfy\", author_email = \"<EMAIL>\", packages =", "packages = find_packages(), include_package_data = True, platforms = \"any\", install_requires = [] #这个项目需要的第三方库", "import setup, find_packages #这个包没有的可以pip一下 with open(\"README.md\", \"r\", encoding=\"utf-8\") as f: long_description = f.read()", "birthday rate\", long_description = long_description, license = \"MIT Licence\", url = \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github", "(\"pip\",\"samebirthdayrate\"), description = \"caculate same birthday rate\", long_description = long_description, license = \"MIT", "#项目相关文件地址,一般是github author = \"gadfy\", author_email = \"<EMAIL>\", packages = find_packages(), include_package_data = True,", "url = \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author = \"gadfy\", author_email = \"<EMAIL>\", packages = find_packages(),", "#!/usr/bin/env python #-*- coding:utf-8 -*- \"\"\" File Name: setup.py Author: gadfy \"\"\" from", "with open(\"README.md\", \"r\", encoding=\"utf-8\") as f: long_description = f.read() setup( name = \"samebirthdayrate\",", "encoding=\"utf-8\") as f: long_description = f.read() setup( name = \"samebirthdayrate\", #这里是pip项目发布的名称 version =", "= f.read() setup( name = \"samebirthdayrate\", #这里是pip项目发布的名称 version = \"1.0.0\", #版本号,数值大的会优先被pip keywords =", "\"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author = \"gadfy\", author_email = \"<EMAIL>\", packages = find_packages(), include_package_data =", "f.read() setup( name = \"samebirthdayrate\", #这里是pip项目发布的名称 version = \"1.0.0\", #版本号,数值大的会优先被pip keywords = (\"pip\",\"samebirthdayrate\"),", "\"r\", encoding=\"utf-8\") as f: long_description = f.read() setup( name = \"samebirthdayrate\", #这里是pip项目发布的名称 version", "long_description = f.read() setup( name = \"samebirthdayrate\", #这里是pip项目发布的名称 version = \"1.0.0\", #版本号,数值大的会优先被pip keywords", "version = \"1.0.0\", #版本号,数值大的会优先被pip keywords = (\"pip\",\"samebirthdayrate\"), description = \"caculate same birthday rate\",", "= \"1.0.0\", #版本号,数值大的会优先被pip keywords = (\"pip\",\"samebirthdayrate\"), description = \"caculate same birthday rate\", long_description", "coding:utf-8 -*- \"\"\" File Name: setup.py Author: gadfy \"\"\" from setuptools import setup,", "\"1.0.0\", #版本号,数值大的会优先被pip keywords = (\"pip\",\"samebirthdayrate\"), description = \"caculate same birthday rate\", long_description =", "setuptools import setup, find_packages #这个包没有的可以pip一下 with open(\"README.md\", \"r\", encoding=\"utf-8\") as f: long_description =", "author_email = \"<EMAIL>\", packages = find_packages(), include_package_data = True, platforms = \"any\", install_requires", "#这个包没有的可以pip一下 with open(\"README.md\", \"r\", encoding=\"utf-8\") as f: long_description = f.read() setup( name =", "python #-*- coding:utf-8 -*- \"\"\" File Name: setup.py Author: gadfy \"\"\" from setuptools", "\"<EMAIL>\", packages = find_packages(), include_package_data = True, platforms = \"any\", install_requires = []", "-*- \"\"\" File Name: setup.py Author: gadfy \"\"\" from setuptools import setup, find_packages", "= \"<EMAIL>\", packages = find_packages(), include_package_data = True, platforms = \"any\", install_requires =", "Author: gadfy \"\"\" from setuptools import setup, find_packages #这个包没有的可以pip一下 with open(\"README.md\", \"r\", encoding=\"utf-8\")", "find_packages #这个包没有的可以pip一下 with open(\"README.md\", \"r\", encoding=\"utf-8\") as f: long_description = f.read() setup( name", "= (\"pip\",\"samebirthdayrate\"), description = \"caculate same birthday rate\", long_description = long_description, license =", "open(\"README.md\", \"r\", encoding=\"utf-8\") as f: long_description = f.read() setup( name = \"samebirthdayrate\", #这里是pip项目发布的名称", "setup.py Author: gadfy \"\"\" from setuptools import setup, find_packages #这个包没有的可以pip一下 with open(\"README.md\", \"r\",", "keywords = (\"pip\",\"samebirthdayrate\"), description = \"caculate same birthday rate\", long_description = long_description, license", "= \"MIT Licence\", url = \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author = \"gadfy\", author_email = \"<EMAIL>\",", "long_description = long_description, license = \"MIT Licence\", url = \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author =", "= \"gadfy\", author_email = \"<EMAIL>\", packages = find_packages(), include_package_data = True, platforms =", "f: long_description = f.read() setup( name = \"samebirthdayrate\", #这里是pip项目发布的名称 version = \"1.0.0\", #版本号,数值大的会优先被pip", "\"caculate same birthday rate\", long_description = long_description, license = \"MIT Licence\", url =", "#-*- coding:utf-8 -*- \"\"\" File Name: setup.py Author: gadfy \"\"\" from setuptools import", "= \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author = \"gadfy\", author_email = \"<EMAIL>\", packages = find_packages(), include_package_data", "setup( name = \"samebirthdayrate\", #这里是pip项目发布的名称 version = \"1.0.0\", #版本号,数值大的会优先被pip keywords = (\"pip\",\"samebirthdayrate\"), description", "as f: long_description = f.read() setup( name = \"samebirthdayrate\", #这里是pip项目发布的名称 version = \"1.0.0\",", "= \"caculate same birthday rate\", long_description = long_description, license = \"MIT Licence\", url", "= find_packages(), include_package_data = True, platforms = \"any\", install_requires = [] #这个项目需要的第三方库 )", "\"\"\" from setuptools import setup, find_packages #这个包没有的可以pip一下 with open(\"README.md\", \"r\", encoding=\"utf-8\") as f:", "#这里是pip项目发布的名称 version = \"1.0.0\", #版本号,数值大的会优先被pip keywords = (\"pip\",\"samebirthdayrate\"), description = \"caculate same birthday", "#版本号,数值大的会优先被pip keywords = (\"pip\",\"samebirthdayrate\"), description = \"caculate same birthday rate\", long_description = long_description,", "rate\", long_description = long_description, license = \"MIT Licence\", url = \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author", "\"\"\" File Name: setup.py Author: gadfy \"\"\" from setuptools import setup, find_packages #这个包没有的可以pip一下", "\"samebirthdayrate\", #这里是pip项目发布的名称 version = \"1.0.0\", #版本号,数值大的会优先被pip keywords = (\"pip\",\"samebirthdayrate\"), description = \"caculate same", "File Name: setup.py Author: gadfy \"\"\" from setuptools import setup, find_packages #这个包没有的可以pip一下 with", "description = \"caculate same birthday rate\", long_description = long_description, license = \"MIT Licence\",", "name = \"samebirthdayrate\", #这里是pip项目发布的名称 version = \"1.0.0\", #版本号,数值大的会优先被pip keywords = (\"pip\",\"samebirthdayrate\"), description =", "gadfy \"\"\" from setuptools import setup, find_packages #这个包没有的可以pip一下 with open(\"README.md\", \"r\", encoding=\"utf-8\") as", "license = \"MIT Licence\", url = \"https://narwelplists.herokuapp.com/\", #项目相关文件地址,一般是github author = \"gadfy\", author_email =", "setup, find_packages #这个包没有的可以pip一下 with open(\"README.md\", \"r\", encoding=\"utf-8\") as f: long_description = f.read() setup(" ]
[ "from programy.storage.stores.sql.engine import SQLStorageEngine from programy.storage.stores.sql.config import SQLStorageConfiguration import programytest.storage.engines as Engines class", "as Engines class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is False, Engines.sql_disabled) def test_initialise(self): config = SQLStorageConfiguration()", "unittest from programy.storage.stores.sql.engine import SQLStorageEngine from programy.storage.stores.sql.config import SQLStorageConfiguration import programytest.storage.engines as Engines", "SQLStorageEngine from programy.storage.stores.sql.config import SQLStorageConfiguration import programytest.storage.engines as Engines class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is", "import SQLStorageEngine from programy.storage.stores.sql.config import SQLStorageConfiguration import programytest.storage.engines as Engines class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql", "Engines.sql_disabled) def test_initialise(self): config = SQLStorageConfiguration() engine = SQLStorageEngine(config) engine.initialise() with self.assertRaises(NotImplementedError): engine.braintree_store()", "Engines class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is False, Engines.sql_disabled) def test_initialise(self): config = SQLStorageConfiguration() engine", "@unittest.skipIf(Engines.sql is False, Engines.sql_disabled) def test_initialise(self): config = SQLStorageConfiguration() engine = SQLStorageEngine(config) engine.initialise()", "programy.storage.stores.sql.config import SQLStorageConfiguration import programytest.storage.engines as Engines class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is False, Engines.sql_disabled)", "programy.storage.stores.sql.engine import SQLStorageEngine from programy.storage.stores.sql.config import SQLStorageConfiguration import programytest.storage.engines as Engines class SQLBraintreeStoreTests(unittest.TestCase):", "import unittest from programy.storage.stores.sql.engine import SQLStorageEngine from programy.storage.stores.sql.config import SQLStorageConfiguration import programytest.storage.engines as", "SQLStorageConfiguration import programytest.storage.engines as Engines class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is False, Engines.sql_disabled) def test_initialise(self):", "class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is False, Engines.sql_disabled) def test_initialise(self): config = SQLStorageConfiguration() engine =", "programytest.storage.engines as Engines class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is False, Engines.sql_disabled) def test_initialise(self): config =", "False, Engines.sql_disabled) def test_initialise(self): config = SQLStorageConfiguration() engine = SQLStorageEngine(config) engine.initialise() with self.assertRaises(NotImplementedError):", "unittest import unittest from programy.storage.stores.sql.engine import SQLStorageEngine from programy.storage.stores.sql.config import SQLStorageConfiguration import programytest.storage.engines", "import unittest import unittest from programy.storage.stores.sql.engine import SQLStorageEngine from programy.storage.stores.sql.config import SQLStorageConfiguration import", "<gh_stars>1-10 import unittest import unittest from programy.storage.stores.sql.engine import SQLStorageEngine from programy.storage.stores.sql.config import SQLStorageConfiguration", "from programy.storage.stores.sql.config import SQLStorageConfiguration import programytest.storage.engines as Engines class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is False,", "import programytest.storage.engines as Engines class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is False, Engines.sql_disabled) def test_initialise(self): config", "is False, Engines.sql_disabled) def test_initialise(self): config = SQLStorageConfiguration() engine = SQLStorageEngine(config) engine.initialise() with", "SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is False, Engines.sql_disabled) def test_initialise(self): config = SQLStorageConfiguration() engine = SQLStorageEngine(config)", "import SQLStorageConfiguration import programytest.storage.engines as Engines class SQLBraintreeStoreTests(unittest.TestCase): @unittest.skipIf(Engines.sql is False, Engines.sql_disabled) def" ]
[ "utf-8 -*- \"\"\"@package Methods.Machine.MagnetType10.comp_volume Compute the Magnet volume method @date Created on Wed", "14:56:19 2014 @copyright (C) 2014-2015 EOMYS ENGINEERING. @author pierre_b \"\"\" def comp_volume(self): \"\"\"Compute", "ENGINEERING. @author pierre_b \"\"\" def comp_volume(self): \"\"\"Compute the Magnet volume (by analytical computation)", "def comp_volume(self): \"\"\"Compute the Magnet volume (by analytical computation) Parameters ---------- self :", "object Returns ------- V: float Magnet volume [m**3] \"\"\" return self.comp_surface() * self.Lmag", "computation) Parameters ---------- self : Magnet A Magnet object Returns ------- V: float", "the Magnet volume (by analytical computation) Parameters ---------- self : Magnet A Magnet", "Compute the Magnet volume method @date Created on Wed Dec 17 14:56:19 2014", "Dec 17 14:56:19 2014 @copyright (C) 2014-2015 EOMYS ENGINEERING. @author pierre_b \"\"\" def", "A Magnet object Returns ------- V: float Magnet volume [m**3] \"\"\" return self.comp_surface()", "on Wed Dec 17 14:56:19 2014 @copyright (C) 2014-2015 EOMYS ENGINEERING. @author pierre_b", "volume method @date Created on Wed Dec 17 14:56:19 2014 @copyright (C) 2014-2015", "@copyright (C) 2014-2015 EOMYS ENGINEERING. @author pierre_b \"\"\" def comp_volume(self): \"\"\"Compute the Magnet", "\"\"\" def comp_volume(self): \"\"\"Compute the Magnet volume (by analytical computation) Parameters ---------- self", "(by analytical computation) Parameters ---------- self : Magnet A Magnet object Returns -------", "2014 @copyright (C) 2014-2015 EOMYS ENGINEERING. @author pierre_b \"\"\" def comp_volume(self): \"\"\"Compute the", "@author pierre_b \"\"\" def comp_volume(self): \"\"\"Compute the Magnet volume (by analytical computation) Parameters", "Magnet A Magnet object Returns ------- V: float Magnet volume [m**3] \"\"\" return", "17 14:56:19 2014 @copyright (C) 2014-2015 EOMYS ENGINEERING. @author pierre_b \"\"\" def comp_volume(self):", "EOMYS ENGINEERING. @author pierre_b \"\"\" def comp_volume(self): \"\"\"Compute the Magnet volume (by analytical", "Parameters ---------- self : Magnet A Magnet object Returns ------- V: float Magnet", "\"\"\"Compute the Magnet volume (by analytical computation) Parameters ---------- self : Magnet A", "-*- \"\"\"@package Methods.Machine.MagnetType10.comp_volume Compute the Magnet volume method @date Created on Wed Dec", "the Magnet volume method @date Created on Wed Dec 17 14:56:19 2014 @copyright", "Created on Wed Dec 17 14:56:19 2014 @copyright (C) 2014-2015 EOMYS ENGINEERING. @author", "comp_volume(self): \"\"\"Compute the Magnet volume (by analytical computation) Parameters ---------- self : Magnet", "Magnet object Returns ------- V: float Magnet volume [m**3] \"\"\" return self.comp_surface() *", "Magnet volume method @date Created on Wed Dec 17 14:56:19 2014 @copyright (C)", "(C) 2014-2015 EOMYS ENGINEERING. @author pierre_b \"\"\" def comp_volume(self): \"\"\"Compute the Magnet volume", "@date Created on Wed Dec 17 14:56:19 2014 @copyright (C) 2014-2015 EOMYS ENGINEERING.", "method @date Created on Wed Dec 17 14:56:19 2014 @copyright (C) 2014-2015 EOMYS", "---------- self : Magnet A Magnet object Returns ------- V: float Magnet volume", "coding: utf-8 -*- \"\"\"@package Methods.Machine.MagnetType10.comp_volume Compute the Magnet volume method @date Created on", "analytical computation) Parameters ---------- self : Magnet A Magnet object Returns ------- V:", "self : Magnet A Magnet object Returns ------- V: float Magnet volume [m**3]", "pierre_b \"\"\" def comp_volume(self): \"\"\"Compute the Magnet volume (by analytical computation) Parameters ----------", "Magnet volume (by analytical computation) Parameters ---------- self : Magnet A Magnet object", "-*- coding: utf-8 -*- \"\"\"@package Methods.Machine.MagnetType10.comp_volume Compute the Magnet volume method @date Created", "volume (by analytical computation) Parameters ---------- self : Magnet A Magnet object Returns", "2014-2015 EOMYS ENGINEERING. @author pierre_b \"\"\" def comp_volume(self): \"\"\"Compute the Magnet volume (by", ": Magnet A Magnet object Returns ------- V: float Magnet volume [m**3] \"\"\"", "\"\"\"@package Methods.Machine.MagnetType10.comp_volume Compute the Magnet volume method @date Created on Wed Dec 17", "Wed Dec 17 14:56:19 2014 @copyright (C) 2014-2015 EOMYS ENGINEERING. @author pierre_b \"\"\"", "# -*- coding: utf-8 -*- \"\"\"@package Methods.Machine.MagnetType10.comp_volume Compute the Magnet volume method @date", "Methods.Machine.MagnetType10.comp_volume Compute the Magnet volume method @date Created on Wed Dec 17 14:56:19" ]
[ "DB.DATABASE[collection].insert(data) @staticmethod def find_one(collection, query): return DB.DATABASE[collection].find_one(query) @staticmethod def find(collection, query): return DB.DATABASE[collection].find(query)", "= MongoClient(DB.URI) DB.DATABASE = client['TDSB'] DB.FS = GridFS(DB.DATABASE) @staticmethod def insert(collection, data): DB.DATABASE[collection].insert(data)", "GridFS(DB.DATABASE) @staticmethod def insert(collection, data): DB.DATABASE[collection].insert(data) @staticmethod def find_one(collection, query): return DB.DATABASE[collection].find_one(query) @staticmethod", "#need delete and update methods @staticmethod def remove(collection, query): DB.DATABASE[collection].remove(query) @staticmethod def update(collection,", "from pymongo import MongoClient from bson.objectid import ObjectId from gridfs import GridFS class", "def remove(collection, query): DB.DATABASE[collection].remove(query) @staticmethod def update(collection, query, update, option=False): DB.DATABASE[collection].update(query, update, option)", "update, option) @staticmethod def save_file(file, filename): file_id = DB.FS.put(file, filename=filename) return file_id @staticmethod", "def insert(collection, data): DB.DATABASE[collection].insert(data) @staticmethod def find_one(collection, query): return DB.DATABASE[collection].find_one(query) @staticmethod def find(collection,", "def find_one(collection, query): return DB.DATABASE[collection].find_one(query) @staticmethod def find(collection, query): return DB.DATABASE[collection].find(query) #need delete", "def init(): client = MongoClient(DB.URI) DB.DATABASE = client['TDSB'] DB.FS = GridFS(DB.DATABASE) @staticmethod def", "import ObjectId from gridfs import GridFS class DB(object): URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def", "@staticmethod def find_one(collection, query): return DB.DATABASE[collection].find_one(query) @staticmethod def find(collection, query): return DB.DATABASE[collection].find(query) #need", "DB.DATABASE[collection].update(query, update, option) @staticmethod def save_file(file, filename): file_id = DB.FS.put(file, filename=filename) return file_id", "def save_file(file, filename): file_id = DB.FS.put(file, filename=filename) return file_id @staticmethod def get_file(file_id): return", "find(collection, query): return DB.DATABASE[collection].find(query) #need delete and update methods @staticmethod def remove(collection, query):", "pymongo import MongoClient from bson.objectid import ObjectId from gridfs import GridFS class DB(object):", "option) @staticmethod def save_file(file, filename): file_id = DB.FS.put(file, filename=filename) return file_id @staticmethod def", "@staticmethod def remove(collection, query): DB.DATABASE[collection].remove(query) @staticmethod def update(collection, query, update, option=False): DB.DATABASE[collection].update(query, update,", "<filename>server/app/database.py<gh_stars>10-100 import pymongo from pymongo import MongoClient from bson.objectid import ObjectId from gridfs", "query): return DB.DATABASE[collection].find(query) #need delete and update methods @staticmethod def remove(collection, query): DB.DATABASE[collection].remove(query)", "def update(collection, query, update, option=False): DB.DATABASE[collection].update(query, update, option) @staticmethod def save_file(file, filename): file_id", "save_file(file, filename): file_id = DB.FS.put(file, filename=filename) return file_id @staticmethod def get_file(file_id): return DB.FS.get(ObjectId(file_id))", "client['TDSB'] DB.FS = GridFS(DB.DATABASE) @staticmethod def insert(collection, data): DB.DATABASE[collection].insert(data) @staticmethod def find_one(collection, query):", "@staticmethod def insert(collection, data): DB.DATABASE[collection].insert(data) @staticmethod def find_one(collection, query): return DB.DATABASE[collection].find_one(query) @staticmethod def", "client = MongoClient(DB.URI) DB.DATABASE = client['TDSB'] DB.FS = GridFS(DB.DATABASE) @staticmethod def insert(collection, data):", "import GridFS class DB(object): URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def init(): client = MongoClient(DB.URI)", "import pymongo from pymongo import MongoClient from bson.objectid import ObjectId from gridfs import", "class DB(object): URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def init(): client = MongoClient(DB.URI) DB.DATABASE =", "= client['TDSB'] DB.FS = GridFS(DB.DATABASE) @staticmethod def insert(collection, data): DB.DATABASE[collection].insert(data) @staticmethod def find_one(collection,", "from bson.objectid import ObjectId from gridfs import GridFS class DB(object): URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\"", "def find(collection, query): return DB.DATABASE[collection].find(query) #need delete and update methods @staticmethod def remove(collection,", "data): DB.DATABASE[collection].insert(data) @staticmethod def find_one(collection, query): return DB.DATABASE[collection].find_one(query) @staticmethod def find(collection, query): return", "pymongo from pymongo import MongoClient from bson.objectid import ObjectId from gridfs import GridFS", "update(collection, query, update, option=False): DB.DATABASE[collection].update(query, update, option) @staticmethod def save_file(file, filename): file_id =", "delete and update methods @staticmethod def remove(collection, query): DB.DATABASE[collection].remove(query) @staticmethod def update(collection, query,", "from gridfs import GridFS class DB(object): URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def init(): client", "MongoClient(DB.URI) DB.DATABASE = client['TDSB'] DB.FS = GridFS(DB.DATABASE) @staticmethod def insert(collection, data): DB.DATABASE[collection].insert(data) @staticmethod", "DB.DATABASE = client['TDSB'] DB.FS = GridFS(DB.DATABASE) @staticmethod def insert(collection, data): DB.DATABASE[collection].insert(data) @staticmethod def", "find_one(collection, query): return DB.DATABASE[collection].find_one(query) @staticmethod def find(collection, query): return DB.DATABASE[collection].find(query) #need delete and", "return DB.DATABASE[collection].find_one(query) @staticmethod def find(collection, query): return DB.DATABASE[collection].find(query) #need delete and update methods", "option=False): DB.DATABASE[collection].update(query, update, option) @staticmethod def save_file(file, filename): file_id = DB.FS.put(file, filename=filename) return", "and update methods @staticmethod def remove(collection, query): DB.DATABASE[collection].remove(query) @staticmethod def update(collection, query, update,", "DB.FS = GridFS(DB.DATABASE) @staticmethod def insert(collection, data): DB.DATABASE[collection].insert(data) @staticmethod def find_one(collection, query): return", "GridFS class DB(object): URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def init(): client = MongoClient(DB.URI) DB.DATABASE", "@staticmethod def save_file(file, filename): file_id = DB.FS.put(file, filename=filename) return file_id @staticmethod def get_file(file_id):", "= \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def init(): client = MongoClient(DB.URI) DB.DATABASE = client['TDSB'] DB.FS =", "DB.DATABASE[collection].remove(query) @staticmethod def update(collection, query, update, option=False): DB.DATABASE[collection].update(query, update, option) @staticmethod def save_file(file,", "insert(collection, data): DB.DATABASE[collection].insert(data) @staticmethod def find_one(collection, query): return DB.DATABASE[collection].find_one(query) @staticmethod def find(collection, query):", "update methods @staticmethod def remove(collection, query): DB.DATABASE[collection].remove(query) @staticmethod def update(collection, query, update, option=False):", "DB(object): URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def init(): client = MongoClient(DB.URI) DB.DATABASE = client['TDSB']", "query): return DB.DATABASE[collection].find_one(query) @staticmethod def find(collection, query): return DB.DATABASE[collection].find(query) #need delete and update", "remove(collection, query): DB.DATABASE[collection].remove(query) @staticmethod def update(collection, query, update, option=False): DB.DATABASE[collection].update(query, update, option) @staticmethod", "@staticmethod def update(collection, query, update, option=False): DB.DATABASE[collection].update(query, update, option) @staticmethod def save_file(file, filename):", "update, option=False): DB.DATABASE[collection].update(query, update, option) @staticmethod def save_file(file, filename): file_id = DB.FS.put(file, filename=filename)", "\"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def init(): client = MongoClient(DB.URI) DB.DATABASE = client['TDSB'] DB.FS = GridFS(DB.DATABASE)", "ObjectId from gridfs import GridFS class DB(object): URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def init():", "URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def init(): client = MongoClient(DB.URI) DB.DATABASE = client['TDSB'] DB.FS", "@staticmethod def init(): client = MongoClient(DB.URI) DB.DATABASE = client['TDSB'] DB.FS = GridFS(DB.DATABASE) @staticmethod", "bson.objectid import ObjectId from gridfs import GridFS class DB(object): URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod", "query, update, option=False): DB.DATABASE[collection].update(query, update, option) @staticmethod def save_file(file, filename): file_id = DB.FS.put(file,", "@staticmethod def find(collection, query): return DB.DATABASE[collection].find(query) #need delete and update methods @staticmethod def", "query): DB.DATABASE[collection].remove(query) @staticmethod def update(collection, query, update, option=False): DB.DATABASE[collection].update(query, update, option) @staticmethod def", "init(): client = MongoClient(DB.URI) DB.DATABASE = client['TDSB'] DB.FS = GridFS(DB.DATABASE) @staticmethod def insert(collection,", "DB.DATABASE[collection].find(query) #need delete and update methods @staticmethod def remove(collection, query): DB.DATABASE[collection].remove(query) @staticmethod def", "MongoClient from bson.objectid import ObjectId from gridfs import GridFS class DB(object): URI =", "= GridFS(DB.DATABASE) @staticmethod def insert(collection, data): DB.DATABASE[collection].insert(data) @staticmethod def find_one(collection, query): return DB.DATABASE[collection].find_one(query)", "import MongoClient from bson.objectid import ObjectId from gridfs import GridFS class DB(object): URI", "return DB.DATABASE[collection].find(query) #need delete and update methods @staticmethod def remove(collection, query): DB.DATABASE[collection].remove(query) @staticmethod", "gridfs import GridFS class DB(object): URI = \"mongodb+srv://TDSB301:<EMAIL>/test?retryWrites=true&w=majority\" @staticmethod def init(): client =", "methods @staticmethod def remove(collection, query): DB.DATABASE[collection].remove(query) @staticmethod def update(collection, query, update, option=False): DB.DATABASE[collection].update(query,", "DB.DATABASE[collection].find_one(query) @staticmethod def find(collection, query): return DB.DATABASE[collection].find(query) #need delete and update methods @staticmethod" ]
[ "if it doesn't exist path = pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True) logger = PyLogger.get_logger(log_file_path, log_max_file_size,", "1024 ** 2 # Max number of files log_max_file_count = 10 if __name__", "the log file if it doesn't exist path = pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True) logger", "import pathlib from pylogger import PyLogger # Path to logs log_file_path = \"logs//example.log\"", "message\") logger.info(\"Sample of INFO message\") logger.warning(\"Sample of WARNING message\") logger.error(\"Sample of ERROR message\")", "from pylogger import PyLogger # Path to logs log_file_path = \"logs//example.log\" # Max", "Path to logs log_file_path = \"logs//example.log\" # Max file size log_max_file_size = 1024", "exist_ok=True) logger = PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count) logger.debug(\"Sample of DEBUG message\") logger.info(\"Sample of INFO", "10 if __name__ == '__main__': # Create a path to the log file", "a path to the log file if it doesn't exist path = pathlib.Path(log_file_path)", "= 10 if __name__ == '__main__': # Create a path to the log", "of files log_max_file_count = 10 if __name__ == '__main__': # Create a path", "# Max file size log_max_file_size = 1024 ** 2 # Max number of", "logger = PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count) logger.debug(\"Sample of DEBUG message\") logger.info(\"Sample of INFO message\")", "doesn't exist path = pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True) logger = PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count) logger.debug(\"Sample", "of INFO message\") logger.warning(\"Sample of WARNING message\") logger.error(\"Sample of ERROR message\") logger.critical(\"Sample of", "of DEBUG message\") logger.info(\"Sample of INFO message\") logger.warning(\"Sample of WARNING message\") logger.error(\"Sample of", "PyLogger # Path to logs log_file_path = \"logs//example.log\" # Max file size log_max_file_size", "# Max number of files log_max_file_count = 10 if __name__ == '__main__': #", "<filename>example.py import pathlib from pylogger import PyLogger # Path to logs log_file_path =", "path = pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True) logger = PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count) logger.debug(\"Sample of DEBUG", "Max file size log_max_file_size = 1024 ** 2 # Max number of files", "Max number of files log_max_file_count = 10 if __name__ == '__main__': # Create", "INFO message\") logger.warning(\"Sample of WARNING message\") logger.error(\"Sample of ERROR message\") logger.critical(\"Sample of CRITICAL", "it doesn't exist path = pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True) logger = PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count)", "__name__ == '__main__': # Create a path to the log file if it", "file if it doesn't exist path = pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True) logger = PyLogger.get_logger(log_file_path,", "exist path = pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True) logger = PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count) logger.debug(\"Sample of", "2 # Max number of files log_max_file_count = 10 if __name__ == '__main__':", "logs log_file_path = \"logs//example.log\" # Max file size log_max_file_size = 1024 ** 2", "if __name__ == '__main__': # Create a path to the log file if", "== '__main__': # Create a path to the log file if it doesn't", "files log_max_file_count = 10 if __name__ == '__main__': # Create a path to", "log_max_file_count) logger.debug(\"Sample of DEBUG message\") logger.info(\"Sample of INFO message\") logger.warning(\"Sample of WARNING message\")", "log_max_file_size = 1024 ** 2 # Max number of files log_max_file_count = 10", "import PyLogger # Path to logs log_file_path = \"logs//example.log\" # Max file size", "= \"logs//example.log\" # Max file size log_max_file_size = 1024 ** 2 # Max", "to the log file if it doesn't exist path = pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True)", "path.parent.mkdir(parents=True, exist_ok=True) logger = PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count) logger.debug(\"Sample of DEBUG message\") logger.info(\"Sample of", "= PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count) logger.debug(\"Sample of DEBUG message\") logger.info(\"Sample of INFO message\") logger.warning(\"Sample", "# Create a path to the log file if it doesn't exist path", "logger.debug(\"Sample of DEBUG message\") logger.info(\"Sample of INFO message\") logger.warning(\"Sample of WARNING message\") logger.error(\"Sample", "\"logs//example.log\" # Max file size log_max_file_size = 1024 ** 2 # Max number", "logger.info(\"Sample of INFO message\") logger.warning(\"Sample of WARNING message\") logger.error(\"Sample of ERROR message\") logger.critical(\"Sample", "pylogger import PyLogger # Path to logs log_file_path = \"logs//example.log\" # Max file", "** 2 # Max number of files log_max_file_count = 10 if __name__ ==", "log_max_file_count = 10 if __name__ == '__main__': # Create a path to the", "= pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True) logger = PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count) logger.debug(\"Sample of DEBUG message\")", "path to the log file if it doesn't exist path = pathlib.Path(log_file_path) path.parent.mkdir(parents=True,", "size log_max_file_size = 1024 ** 2 # Max number of files log_max_file_count =", "log_file_path = \"logs//example.log\" # Max file size log_max_file_size = 1024 ** 2 #", "message\") logger.warning(\"Sample of WARNING message\") logger.error(\"Sample of ERROR message\") logger.critical(\"Sample of CRITICAL message\")", "pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True) logger = PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count) logger.debug(\"Sample of DEBUG message\") logger.info(\"Sample", "# Path to logs log_file_path = \"logs//example.log\" # Max file size log_max_file_size =", "pathlib from pylogger import PyLogger # Path to logs log_file_path = \"logs//example.log\" #", "to logs log_file_path = \"logs//example.log\" # Max file size log_max_file_size = 1024 **", "Create a path to the log file if it doesn't exist path =", "log file if it doesn't exist path = pathlib.Path(log_file_path) path.parent.mkdir(parents=True, exist_ok=True) logger =", "DEBUG message\") logger.info(\"Sample of INFO message\") logger.warning(\"Sample of WARNING message\") logger.error(\"Sample of ERROR", "file size log_max_file_size = 1024 ** 2 # Max number of files log_max_file_count", "PyLogger.get_logger(log_file_path, log_max_file_size, log_max_file_count) logger.debug(\"Sample of DEBUG message\") logger.info(\"Sample of INFO message\") logger.warning(\"Sample of", "number of files log_max_file_count = 10 if __name__ == '__main__': # Create a", "log_max_file_size, log_max_file_count) logger.debug(\"Sample of DEBUG message\") logger.info(\"Sample of INFO message\") logger.warning(\"Sample of WARNING", "= 1024 ** 2 # Max number of files log_max_file_count = 10 if", "'__main__': # Create a path to the log file if it doesn't exist" ]
[ "= [\"Alma\",\"Armut\",\"Üzüm\",\"Çilek\",\"Karpuz\",\"Muz\"] print(meyveler) #bunu listeye dökmek için; print(meyveler[0]) print(meyveler[1]) print(meyveler[2]) print(meyveler[3]) #python da", "#python da dizileri saymak sıfırdan başlanır. #dizide kaç tane eleman bulunduğunu öğrenmek içinde", "meyveler = [\"Alma\",\"Armut\",\"Üzüm\",\"Çilek\",\"Karpuz\",\"Muz\"] print(meyveler) #bunu listeye dökmek için; print(meyveler[0]) print(meyveler[1]) print(meyveler[2]) print(meyveler[3]) #python", "tane eleman bulunduğunu öğrenmek içinde \"len\" komutunu kullanırız. print(len(meyveler)) #lenght meyveler[0]= \"Elma\" #diziyi", "print(len(meyveler)) #lenght meyveler[0]= \"Elma\" #diziyi değiştirme komutudur. biz burda sıfırıncı indexini elma olarak", "eleman bulunduğunu öğrenmek içinde \"len\" komutunu kullanırız. print(len(meyveler)) #lenght meyveler[0]= \"Elma\" #diziyi değiştirme", "print(meyveler[0]) print(meyveler[1]) print(meyveler[2]) print(meyveler[3]) #python da dizileri saymak sıfırdan başlanır. #dizide kaç tane", "saymak sıfırdan başlanır. #dizide kaç tane eleman bulunduğunu öğrenmek içinde \"len\" komutunu kullanırız.", "değiştirme komutudur. biz burda sıfırıncı indexini elma olarak değiştiriyorz.(index numarası dizilerde bulunan madde", "dökmek için; print(meyveler[0]) print(meyveler[1]) print(meyveler[2]) print(meyveler[3]) #python da dizileri saymak sıfırdan başlanır. #dizide", "print(meyveler[1]) print(meyveler[2]) print(meyveler[3]) #python da dizileri saymak sıfırdan başlanır. #dizide kaç tane eleman", "bulunduğunu öğrenmek içinde \"len\" komutunu kullanırız. print(len(meyveler)) #lenght meyveler[0]= \"Elma\" #diziyi değiştirme komutudur.", "#lenght meyveler[0]= \"Elma\" #diziyi değiştirme komutudur. biz burda sıfırıncı indexini elma olarak değiştiriyorz.(index", "sıfırdan başlanır. #dizide kaç tane eleman bulunduğunu öğrenmek içinde \"len\" komutunu kullanırız. print(len(meyveler))", "biz burda sıfırıncı indexini elma olarak değiştiriyorz.(index numarası dizilerde bulunan madde numaralarıdır.) print(meyveler)", "kaç tane eleman bulunduğunu öğrenmek içinde \"len\" komutunu kullanırız. print(len(meyveler)) #lenght meyveler[0]= \"Elma\"", "\"Elma\" #diziyi değiştirme komutudur. biz burda sıfırıncı indexini elma olarak değiştiriyorz.(index numarası dizilerde", "komutudur. biz burda sıfırıncı indexini elma olarak değiştiriyorz.(index numarası dizilerde bulunan madde numaralarıdır.)", "dizileri saymak sıfırdan başlanır. #dizide kaç tane eleman bulunduğunu öğrenmek içinde \"len\" komutunu", "#bunu listeye dökmek için; print(meyveler[0]) print(meyveler[1]) print(meyveler[2]) print(meyveler[3]) #python da dizileri saymak sıfırdan", "listeye dökmek için; print(meyveler[0]) print(meyveler[1]) print(meyveler[2]) print(meyveler[3]) #python da dizileri saymak sıfırdan başlanır.", "print(meyveler[2]) print(meyveler[3]) #python da dizileri saymak sıfırdan başlanır. #dizide kaç tane eleman bulunduğunu", "print(meyveler) #bunu listeye dökmek için; print(meyveler[0]) print(meyveler[1]) print(meyveler[2]) print(meyveler[3]) #python da dizileri saymak", "kullanırız. print(len(meyveler)) #lenght meyveler[0]= \"Elma\" #diziyi değiştirme komutudur. biz burda sıfırıncı indexini elma", "#diziyi değiştirme komutudur. biz burda sıfırıncı indexini elma olarak değiştiriyorz.(index numarası dizilerde bulunan", "\"len\" komutunu kullanırız. print(len(meyveler)) #lenght meyveler[0]= \"Elma\" #diziyi değiştirme komutudur. biz burda sıfırıncı", "da dizileri saymak sıfırdan başlanır. #dizide kaç tane eleman bulunduğunu öğrenmek içinde \"len\"", "meyveler[0]= \"Elma\" #diziyi değiştirme komutudur. biz burda sıfırıncı indexini elma olarak değiştiriyorz.(index numarası", "öğrenmek içinde \"len\" komutunu kullanırız. print(len(meyveler)) #lenght meyveler[0]= \"Elma\" #diziyi değiştirme komutudur. biz", "içinde \"len\" komutunu kullanırız. print(len(meyveler)) #lenght meyveler[0]= \"Elma\" #diziyi değiştirme komutudur. biz burda", "komutunu kullanırız. print(len(meyveler)) #lenght meyveler[0]= \"Elma\" #diziyi değiştirme komutudur. biz burda sıfırıncı indexini", "#dizide kaç tane eleman bulunduğunu öğrenmek içinde \"len\" komutunu kullanırız. print(len(meyveler)) #lenght meyveler[0]=", "[\"Alma\",\"Armut\",\"Üzüm\",\"Çilek\",\"Karpuz\",\"Muz\"] print(meyveler) #bunu listeye dökmek için; print(meyveler[0]) print(meyveler[1]) print(meyveler[2]) print(meyveler[3]) #python da dizileri", "print(meyveler[3]) #python da dizileri saymak sıfırdan başlanır. #dizide kaç tane eleman bulunduğunu öğrenmek", "için; print(meyveler[0]) print(meyveler[1]) print(meyveler[2]) print(meyveler[3]) #python da dizileri saymak sıfırdan başlanır. #dizide kaç", "başlanır. #dizide kaç tane eleman bulunduğunu öğrenmek içinde \"len\" komutunu kullanırız. print(len(meyveler)) #lenght" ]
[ "def js_file(data): temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try: yield temp.name finally: os.remove(temp.name) class", "from contextlib import contextmanager import six from v8cffi.platform import platform from v8cffi.vm import", "= string_ptr def test_free(self): \"\"\" It should free the string \"\"\" with patch('v8cffi.context.lib',", "' var my_var_2;\\n' '}') script_oops2 = ( 'function oops2() {\\n' ' thereMayBeMoreErrors();\\n' '", "pass def tearDown(self): pass def test_with(self): \"\"\" It should support with statement \"\"\"", "on V8\\ and get a useful traceback \"\"\" def get_exception_message(ctx, script): try: return", "context.Context(self.vm) as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n' '", "oops2() {\\n' ' thereMayBeMoreErrors();\\n' ' var my_var_2;\\n' '}') var_a = 'var a;' script_long", "'ReferenceError: nonExistentFunc is not defined\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n' '", "+ '\\n' '}') # todo: trim source line when too long with context.Context(self.vm)", "= context._String() with s as _: self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self): \"\"\" It should", "import unicode_literals try: from unittest.mock import patch, Mock except ImportError: from mock import", "+ var_a * 100 + '\\n' '}') # todo: trim source line when", "= ( 'function oops() {\\n' ' thereMayBeErrors();\\n' ' var my_var_2;\\n' '}') script_oops2 =", "'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def test_tear_down(self): \"\"\" It should call __exit__ \"\"\" ctx", "self.assertEqual( 'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeErrors is not defined\\n' ' at", "var_a * 100 + 'thereMayBeMoreErrors();' + var_a * 100 + '\\n' '}') #", "context._String() as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3", "the script file content on V8 \"\"\" script = b'var foo = \"foo\";'", "as path: with context.Context(self.vm) as ctx: with patch.object(ctx, 'run_script', autospec=True) as r: ctx.load_libs([path])", "= string_ptr def test_to_bytes(self): \"\"\" It should return the string bytes \"\"\" with", "def test_tear_down(self): \"\"\" It should call __exit__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx,", "thereMayBeMoreErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops2 (my_other_file.js:2:3)\\n' '", "thereMayBeErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeErrors is not defined\\n' ' at oops (my_file_áéíóú.js:2:3)\\n' '", "v8cffi import exceptions from v8cffi import context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def setUp(self): pass", "script on V8\\ and get a useful traceback \"\"\" def get_exception_message(ctx, script): try:", "s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr = string_ptr", "\"\"\" It should fail to re exit \"\"\" s = context._String() self.assertRaises(AssertionError, s.__exit__)", "' at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n' ' function[]();\\n' ' ^\\n' 'SyntaxError: Unexpected", "the string bytes \"\"\" with context._String() as s: string_ptr = s.string_ptr s.string_ptr =", "[context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr = string_ptr def test_free(self): \"\"\"", "var my_var_2;\\n' '}') var_a = 'var a;' script_long = ( 'function oops3() {\\n'", "nonExistentFunc();\\n' ' ^\\n' 'ReferenceError: nonExistentFunc is not defined\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();'))", "call \"\"\" with context._String() as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')]", "path: with context.Context(self.vm) as ctx: with patch.object(ctx, 'run_script', autospec=True) as r: ctx.load_libs([path]) r.assert_called_once_with(script,", "' at oops (my_file_áéíóú.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n'", "r.assert_called_once_with() def test_load_libs(self): \"\"\" It should run the script file content on V8", "'var txt = \"áéíóú\";' with context.Context(self.vm) as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo'))", "class StringTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_with(self): \"\"\" It should", "logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_with(self): \"\"\" It", "= 'var bar = \"bar!\";' script_special = 'var txt = \"áéíóú\";' with context.Context(self.vm)", "the VM \"\"\" ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def test_with(self): \"\"\" It should", "finally: os.remove(temp.name) class ContextTest(unittest.TestCase): def setUp(self): self.vm = VM(platform) self.vm.set_up() def tearDown(self): self.vm.tear_down()", "create \"\"\" s = context._String() with s as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s", "free the string \"\"\" with patch('v8cffi.context.lib', autospec=True) as r: s = context._String() s.__enter__()", "\"\"\" script = b'var foo = \"foo\";' with js_file(script) as path: with context.Context(self.vm)", "s.__exit__) with s as _: pass self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self): \"\"\" It should", "defined\\n' ' at oops2 (my_other_file.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\\n' '", "with s as _: self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data): temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close()", "tempfile from contextlib import contextmanager import six from v8cffi.platform import platform from v8cffi.vm", "is not defined\\n' ' at oops2 (my_other_file.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual(", "self.vm = VM(platform) self.vm.set_up() def tearDown(self): self.vm.tear_down() def test_keep_vm(self): \"\"\" It should keep", "self.vm.tear_down() def test_keep_vm(self): \"\"\" It should keep a reference to the VM \"\"\"", "= ( 'function oops3() {\\n' + var_a * 100 + 'thereMayBeMoreErrors();' + var_a", "tearDown(self): pass def test_with(self): \"\"\" It should support with statement \"\"\" with context._String()", "as r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def test_run_script(self): \"\"\" It should run the script", "context._String() with s as _: self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self): \"\"\" It should fail", "= \"foo!\";' script_bar = 'var bar = \"bar!\";' script_special = 'var txt =", "patch, Mock except ImportError: from mock import patch, Mock import unittest import logging", "' thereMayBeMoreErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops2 (my_other_file.js:2:3)\\n'", "It should pre-load builtin libraries \"\"\" with context.Context(self.vm) as ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);'))", "try: return ctx.run_script(script) except exceptions.V8JSError as ex: return six.text_type(ex) script_oops = ( 'function", "ex: return six.text_type(ex) script_oops = ( 'function oops() {\\n' ' thereMayBeErrors();\\n' ' var", "import platform from v8cffi.vm import VM from v8cffi import exceptions from v8cffi import", "\"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__enter__', autospec=True) as r: r.return_value = 'foo'", "= 3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr = string_ptr def test_free(self): \"\"\" It should free", "'foo') r.assert_called_once_with() def test_tear_down(self): \"\"\" It should call __exit__ \"\"\" ctx = context.Context(self.vm)", "on V8 \"\"\" script = b'var foo = \"foo\";' with js_file(script) as path:", "' function[]();\\n' ' ^\\n' 'SyntaxError: Unexpected token [', get_exception_message(ctx, 'function[]();')) # Has no", "V8 \"\"\" script = b'var foo = \"foo\";' with js_file(script) as path: with", "script_oops2 = ( 'function oops2() {\\n' ' thereMayBeMoreErrors();\\n' ' var my_var_2;\\n' '}') var_a", "with patch('v8cffi.context.lib', autospec=True) as r: s = context._String() s.__enter__() free = Mock() r.v8cffi_free", "script on V8 \"\"\" script_foo = b'var foo = \"foo!\";' script_bar = 'var", "'function oops() {\\n' ' thereMayBeErrors();\\n' ' var my_var_2;\\n' '}') script_oops2 = ( 'function", "ctx.run_script(script) except exceptions.V8JSError as ex: return six.text_type(ex) script_oops = ( 'function oops() {\\n'", "' ~Line too long to display.\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at", "self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError,", "test_builtin_libs(self): \"\"\" It should pre-load builtin libraries \"\"\" with context.Context(self.vm) as ctx: self.assertEqual(\"20\",", "context.Context(self.vm) as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError,", "from __future__ import unicode_literals try: from unittest.mock import patch, Mock except ImportError: from", "var_a = 'var a;' script_long = ( 'function oops3() {\\n' + var_a *", "platform from v8cffi.vm import VM from v8cffi import exceptions from v8cffi import context", "enter \"\"\" s = context._String() with s as _: self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self):", "b'foo')] s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr = string_ptr def test_free(self): \"\"\" It", "os import tempfile from contextlib import contextmanager import six from v8cffi.platform import platform", "autospec=True) as r: ctx.tear_down() r.assert_called_once_with() def test_load_libs(self): \"\"\" It should run the script", "run the script on V8 \"\"\" script_foo = b'var foo = \"foo!\";' script_bar", "self.assertEqual( '<anonymous>:1\\n' ' nonExistentFunc();\\n' ' ^\\n' 'ReferenceError: nonExistentFunc is not defined\\n' ' at", "StringTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_with(self): \"\"\" It should support", "'oops3()')) self.assertEqual( '<anonymous>:1\\n' ' nonExistentFunc();\\n' ' ^\\n' 'ReferenceError: nonExistentFunc is not defined\\n' '", "'}') var_a = 'var a;' script_long = ( 'function oops3() {\\n' + var_a", "utf-8 -*- from __future__ import unicode_literals try: from unittest.mock import patch, Mock except", "ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with context.Context(self.vm)", "ctx.tear_down() r.assert_called_once_with() def test_load_libs(self): \"\"\" It should run the script file content on", "def test_keep_vm(self): \"\"\" It should keep a reference to the VM \"\"\" ctx", "var my_var_2;\\n' '}') script_oops2 = ( 'function oops2() {\\n' ' thereMayBeMoreErrors();\\n' ' var", "= 'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def test_tear_down(self): \"\"\" It should call __exit__ \"\"\"", "as r: r.return_value = 'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def test_tear_down(self): \"\"\" It should", "with s as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s as _: self.assertIsNotNone(s.string_ptr) @contextmanager def", "( 'function oops2() {\\n' ' thereMayBeMoreErrors();\\n' ' var my_var_2;\\n' '}') var_a = 'var", "test_load_libs(self): \"\"\" It should run the script file content on V8 \"\"\" script", "as _: pass self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self): \"\"\" It should allow to re", "foo = \"foo!\";' script_bar = 'var bar = \"bar!\";' script_special = 'var txt", "VM \"\"\" ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def test_with(self): \"\"\" It should support", "test_to_str(self): \"\"\" It should support str call \"\"\" with context._String() as s: string_ptr", "'ReferenceError: thereMayBeErrors is not defined\\n' ' at oops (my_file_áéíóú.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx,", "patch('v8cffi.context.lib', autospec=True) as r: s = context._String() s.__enter__() free = Mock() r.v8cffi_free =", "txt = \"áéíóú\";' with context.Context(self.vm) as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\",", "thereMayBeMoreErrors is not defined\\n' ' at oops2 (my_other_file.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops2()'))", "test_with(self): \"\"\" It should support with statement \"\"\" with context.Context(self.vm) as ctx: self.assertIsInstance(ctx,", "import os import tempfile from contextlib import contextmanager import six from v8cffi.platform import", "'thereMayBeMoreErrors();' + var_a * 100 + '\\n' '}') # todo: trim source line", "with patch.object(ctx, 'run_script', autospec=True) as r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def test_run_script(self): \"\"\" It", "ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();')", "with patch.object(ctx, '__enter__', autospec=True) as r: r.return_value = 'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def", "( 'function oops() {\\n' ' thereMayBeErrors();\\n' ' var my_var_2;\\n' '}') script_oops2 = (", "self.assertEqual(six.text_type(s), 'foo') s.string_ptr = string_ptr def test_to_bytes(self): \"\"\" It should return the string", "\"\"\" s = context._String() with s as _: self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self): \"\"\"", "' thereMayBeErrors();\\n' ' var my_var_2;\\n' '}') script_oops2 = ( 'function oops2() {\\n' '", "string \"\"\" with patch('v8cffi.context.lib', autospec=True) as r: s = context._String() s.__enter__() free =", "with context.Context(self.vm) as ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self): \"\"\" It should run", "= context._String() self.assertRaises(AssertionError, s.__exit__) with s as _: pass self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self):", "get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n' ' function[]();\\n' ' ^\\n' 'SyntaxError: Unexpected token [', get_exception_message(ctx,", "* 100 + 'thereMayBeMoreErrors();' + var_a * 100 + '\\n' '}') # todo:", "= b'var foo = \"foo\";' with js_file(script) as path: with context.Context(self.vm) as ctx:", "r.assert_called_once_with(script, identifier=path) def test_run_script(self): \"\"\" It should run the script on V8 \"\"\"", "on V8 \"\"\" script_foo = b'var foo = \"foo!\";' script_bar = 'var bar", "builtin libraries \"\"\" with context.Context(self.vm) as ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self): \"\"\"", "statement \"\"\" with context._String() as s: self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual(", "yield temp.name finally: os.remove(temp.name) class ContextTest(unittest.TestCase): def setUp(self): self.vm = VM(platform) self.vm.set_up() def", "js_file(data): temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try: yield temp.name finally: os.remove(temp.name) class ContextTest(unittest.TestCase):", "ctx: with patch.object(ctx, 'run_script', autospec=True) as r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def test_run_script(self): \"\"\"", "def test_load_libs(self): \"\"\" It should run the script file content on V8 \"\"\"", "should run the script on V8\\ and get a useful traceback \"\"\" def", "reference to the VM \"\"\" ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def test_with(self): \"\"\"", "context._String) self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0)", "script file content on V8 \"\"\" script = b'var foo = \"foo\";' with", "r.assert_called_once_with() def test_tear_down(self): \"\"\" It should call __exit__ \"\"\" ctx = context.Context(self.vm) with", "self.assertEqual( '<anonymous>:2\\n' ' ~Line too long to display.\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n'", "content on V8 \"\"\" script = b'var foo = \"foo\";' with js_file(script) as", "should support str call \"\"\" with context._String() as s: string_ptr = s.string_ptr s.string_ptr", "' var my_var_2;\\n' '}') var_a = 'var a;' script_long = ( 'function oops3()", "self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def test_to_str(self): \"\"\" It should support str call \"\"\"", "\"\"\" with context._String() as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0]", "at oops (my_file_áéíóú.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n' '", "context._String() as s: self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr))", "as _: self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data): temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try: yield", "ImportError: from mock import patch, Mock import unittest import logging import os import", "a useful traceback \"\"\" def get_exception_message(ctx, script): try: return ctx.run_script(script) except exceptions.V8JSError as", "with s as _: self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self): \"\"\" It should fail to", "ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def test_with(self): \"\"\" It should support with statement", "self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with", "script_special = 'var txt = \"áéíóú\";' with context.Context(self.vm) as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special)", "import logging import os import tempfile from contextlib import contextmanager import six from", "setUp(self): self.vm = VM(platform) self.vm.set_up() def tearDown(self): self.vm.tear_down() def test_keep_vm(self): \"\"\" It should", "_: pass self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self): \"\"\" It should allow to re create", "b'var foo = \"foo!\";' script_bar = 'var bar = \"bar!\";' script_special = 'var", "'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def test_builtin_libs(self):", "re exit \"\"\" s = context._String() self.assertRaises(AssertionError, s.__exit__) with s as _: pass", "def test_to_str(self): \"\"\" It should support str call \"\"\" with context._String() as s:", "unittest.mock import patch, Mock except ImportError: from mock import patch, Mock import unittest", "v8cffi.vm import VM from v8cffi import exceptions from v8cffi import context logging.disable(logging.CRITICAL) class", "s as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s as _: self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data):", "ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz')", "should call __exit__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__exit__', autospec=True) as r:", "setUp(self): pass def tearDown(self): pass def test_with(self): \"\"\" It should support with statement", "tearDown(self): self.vm.tear_down() def test_keep_vm(self): \"\"\" It should keep a reference to the VM", "self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def test_builtin_libs(self): \"\"\"", "should support with statement \"\"\" with context._String() as s: self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char", "return six.text_type(ex) script_oops = ( 'function oops() {\\n' ' thereMayBeErrors();\\n' ' var my_var_2;\\n'", "<anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeMoreErrors is not", "context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def test_with(self): \"\"\" It should support with statement \"\"\" with", "should run the script on V8 \"\"\" script_foo = b'var foo = \"foo!\";'", "import six from v8cffi.platform import platform from v8cffi.vm import VM from v8cffi import", "token [', get_exception_message(ctx, 'function[]();')) # Has no .stack property self.assertEqual( '<anonymous>:2\\n' ' throw", "b'foo')] s.len_ptr[0] = 3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr = string_ptr def test_to_bytes(self): \"\"\" It", "context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def test_to_str(self): \"\"\" It", "should keep a reference to the VM \"\"\" ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm, VM)", "= VM(platform) self.vm.set_up() def tearDown(self): self.vm.tear_down() def test_keep_vm(self): \"\"\" It should keep a", "VM from v8cffi import exceptions from v8cffi import context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def", "self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def", "def test_set_up(self): \"\"\" It should call __enter__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx,", "def test_run_script(self): \"\"\" It should run the script on V8 \"\"\" script_foo =", "support with statement \"\"\" with context.Context(self.vm) as ctx: self.assertIsInstance(ctx, context.Context) def test_set_up(self): \"\"\"", "and get a useful traceback \"\"\" def get_exception_message(ctx, script): try: return ctx.run_script(script) except", "\"\"\" It should run the script file content on V8 \"\"\" script =", "long with context.Context(self.vm) as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n' '", "temp.write(data) temp.close() try: yield temp.name finally: os.remove(temp.name) class ContextTest(unittest.TestCase): def setUp(self): self.vm =", "try: yield temp.name finally: os.remove(temp.name) class ContextTest(unittest.TestCase): def setUp(self): self.vm = VM(platform) self.vm.set_up()", "context._String() with s as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s as _: self.assertIsNotNone(s.string_ptr) @contextmanager", "= \"foo\";' with js_file(script) as path: with context.Context(self.vm) as ctx: with patch.object(ctx, 'run_script',", "context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def test_to_str(self): \"\"\" It should support str call \"\"\" with", "script_oops = ( 'function oops() {\\n' ' thereMayBeErrors();\\n' ' var my_var_2;\\n' '}') script_oops2", "= s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr =", "statement \"\"\" with context.Context(self.vm) as ctx: self.assertIsInstance(ctx, context.Context) def test_set_up(self): \"\"\" It should", "to the VM \"\"\" ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def test_with(self): \"\"\" It", "my_var_2;\\n' '}') var_a = 'var a;' script_long = ( 'function oops3() {\\n' +", "string_ptr def test_to_bytes(self): \"\"\" It should return the string bytes \"\"\" with context._String()", "= context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def test_with(self): \"\"\" It should support with statement \"\"\"", "coding: utf-8 -*- from __future__ import unicode_literals try: from unittest.mock import patch, Mock", "= context.Context(self.vm) with patch.object(ctx, '__enter__', autospec=True) as r: r.return_value = 'foo' self.assertEqual(ctx.set_up(), 'foo')", "from unittest.mock import patch, Mock except ImportError: from mock import patch, Mock import", "with context.Context(self.vm) as ctx: self.assertIsInstance(ctx, context.Context) def test_set_up(self): \"\"\" It should call __enter__", "Mock except ImportError: from mock import patch, Mock import unittest import logging import", "ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def test_run_script(self): \"\"\" It should run the script on V8", "with context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def test_builtin_libs(self): \"\"\" It should pre-load", "is not defined\\n' ' at oops3 (<anonymous>:2:601)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual(", "'<anonymous>:1\\n' ' function[]();\\n' ' ^\\n' 'SyntaxError: Unexpected token [', get_exception_message(ctx, 'function[]();')) # Has", "context.Context) def test_set_up(self): \"\"\" It should call __enter__ \"\"\" ctx = context.Context(self.vm) with", ".stack property self.assertEqual( '<anonymous>:2\\n' ' throw \"myException\";\\n' ' ^\\n' 'myException', get_exception_message( ctx, '(function()", "s = context._String() with s as _: self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self): \"\"\" It", "display.\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops3 (<anonymous>:2:601)\\n' ' at <anonymous>:1:1',", "<anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\\n' ' nonExistentFunc();\\n' ' ^\\n' 'ReferenceError: nonExistentFunc is not", "b'var foo = \"foo\";' with js_file(script) as path: with context.Context(self.vm) as ctx: with", "20);')) def test_run_script_trace_back(self): \"\"\" It should run the script on V8\\ and get", "100 + '\\n' '}') # todo: trim source line when too long with", "support with statement \"\"\" with context._String() as s: self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char **'),", "It should return the string bytes \"\"\" with context._String() as s: string_ptr =", "context.Context(self.vm) as ctx: self.assertIsInstance(ctx, context.Context) def test_set_up(self): \"\"\" It should call __enter__ \"\"\"", "as s: self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0],", "self.vm.set_up() def tearDown(self): self.vm.tear_down() def test_keep_vm(self): \"\"\" It should keep a reference to", "import patch, Mock except ImportError: from mock import patch, Mock import unittest import", "at <anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\\n' ' nonExistentFunc();\\n' ' ^\\n' 'ReferenceError: nonExistentFunc is", "with statement \"\"\" with context._String() as s: self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr))", "100 + 'thereMayBeMoreErrors();' + var_a * 100 + '\\n' '}') # todo: trim", "'oops()')) self.assertEqual( 'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' '", "patch.object(ctx, '__enter__', autospec=True) as r: r.return_value = 'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def test_tear_down(self):", "line when too long with context.Context(self.vm) as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long)", "def tearDown(self): self.vm.tear_down() def test_keep_vm(self): \"\"\" It should keep a reference to the", "It should fail to re enter \"\"\" s = context._String() with s as", "get a useful traceback \"\"\" def get_exception_message(ctx, script): try: return ctx.run_script(script) except exceptions.V8JSError", "\"\"\" with context._String() as s: self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t", "It should run the script on V8\\ and get a useful traceback \"\"\"", "def test_to_bytes(self): \"\"\" It should return the string bytes \"\"\" with context._String() as", "\"\"\" with context.Context(self.vm) as ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self): \"\"\" It should", "string_ptr def test_free(self): \"\"\" It should free the string \"\"\" with patch('v8cffi.context.lib', autospec=True)", "test_assert_on_re_exit(self): \"\"\" It should fail to re exit \"\"\" s = context._String() self.assertRaises(AssertionError,", "**'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def test_to_str(self): \"\"\"", "' at oops3 (<anonymous>:2:601)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\\n' ' nonExistentFunc();\\n'", "def test_assert_on_re_enter(self): \"\"\" It should fail to re enter \"\"\" s = context._String()", "oops2 (my_other_file.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\\n' ' ~Line too long", "def test_with(self): \"\"\" It should support with statement \"\"\" with context.Context(self.vm) as ctx:", "foo = \"foo\";' with js_file(script) as path: with context.Context(self.vm) as ctx: with patch.object(ctx,", "var_a * 100 + '\\n' '}') # todo: trim source line when too", "3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr = string_ptr def test_free(self): \"\"\" It should free the", "\"\"\" It should support with statement \"\"\" with context._String() as s: self.assertIsInstance(s, context._String)", "_: self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self): \"\"\" It should fail to re exit \"\"\"", "\"\"\" It should fail to re enter \"\"\" s = context._String() with s", "test_assert_on_re_enter(self): \"\"\" It should fail to re enter \"\"\" s = context._String() with", "self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0],", "should support with statement \"\"\" with context.Context(self.vm) as ctx: self.assertIsInstance(ctx, context.Context) def test_set_up(self):", "'oops2()')) self.assertEqual( '<anonymous>:2\\n' ' ~Line too long to display.\\n' 'ReferenceError: thereMayBeMoreErrors is not", "bytes \"\"\" with context._String() as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')]", "self.assertTrue(free.called) def test_assert_on_re_enter(self): \"\"\" It should fail to re enter \"\"\" s =", "re enter \"\"\" s = context._String() with s as _: self.assertRaises(AssertionError, s.__enter__) def", "test_keep_vm(self): \"\"\" It should keep a reference to the VM \"\"\" ctx =", "property self.assertEqual( '<anonymous>:2\\n' ' throw \"myException\";\\n' ' ^\\n' 'myException', get_exception_message( ctx, '(function() {\\n'", "= \"bar!\";' script_special = 'var txt = \"áéíóú\";' with context.Context(self.vm) as ctx: ctx.run_script(script_foo)", "= context._String() s.__enter__() free = Mock() r.v8cffi_free = free s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self):", "ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n' ' ^\\n' 'ReferenceError:", "with s as _: pass self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self): \"\"\" It should allow", "get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\\n' ' nonExistentFunc();\\n' ' ^\\n' 'ReferenceError: nonExistentFunc is not defined\\n'", "context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def test_to_str(self):", "def setUp(self): pass def tearDown(self): pass def test_with(self): \"\"\" It should support with", "free s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self): \"\"\" It should fail to re enter \"\"\"", "'SyntaxError: Unexpected token [', get_exception_message(ctx, 'function[]();')) # Has no .stack property self.assertEqual( '<anonymous>:2\\n'", "'var a;' script_long = ( 'function oops3() {\\n' + var_a * 100 +", "It should keep a reference to the VM \"\"\" ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm,", "traceback \"\"\" def get_exception_message(ctx, script): try: return ctx.run_script(script) except exceptions.V8JSError as ex: return", "no .stack property self.assertEqual( '<anonymous>:2\\n' ' throw \"myException\";\\n' ' ^\\n' 'myException', get_exception_message( ctx,", "'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops3 (<anonymous>:2:601)\\n' ' at <anonymous>:1:1', get_exception_message(ctx,", "'<anonymous>:2\\n' ' throw \"myException\";\\n' ' ^\\n' 'myException', get_exception_message( ctx, '(function() {\\n' ' throw", "a reference to the VM \"\"\" ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def test_with(self):", "to display.\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops3 (<anonymous>:2:601)\\n' ' at", "_: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s as _: self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data): temp =", "\"\"\" def get_exception_message(ctx, script): try: return ctx.run_script(script) except exceptions.V8JSError as ex: return six.text_type(ex)", "script): try: return ctx.run_script(script) except exceptions.V8JSError as ex: return six.text_type(ex) script_oops = (", "useful traceback \"\"\" def get_exception_message(ctx, script): try: return ctx.run_script(script) except exceptions.V8JSError as ex:", "ctx = context.Context(self.vm) with patch.object(ctx, '__exit__', autospec=True) as r: ctx.tear_down() r.assert_called_once_with() def test_load_libs(self):", "self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self): \"\"\" It should fail to re exit \"\"\" s", "mock import patch, Mock import unittest import logging import os import tempfile from", "( 'function oops3() {\\n' + var_a * 100 + 'thereMayBeMoreErrors();' + var_a *", "'function[]();') with context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def test_builtin_libs(self): \"\"\" It should", "' at <anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\\n' ' nonExistentFunc();\\n' ' ^\\n' 'ReferenceError: nonExistentFunc", "pass def test_with(self): \"\"\" It should support with statement \"\"\" with context._String() as", "'__exit__', autospec=True) as r: ctx.tear_down() r.assert_called_once_with() def test_load_libs(self): \"\"\" It should run the", "Mock() r.v8cffi_free = free s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self): \"\"\" It should fail to", "six.text_type(ex) script_oops = ( 'function oops() {\\n' ' thereMayBeErrors();\\n' ' var my_var_2;\\n' '}')", "six from v8cffi.platform import platform from v8cffi.vm import VM from v8cffi import exceptions", "exceptions from v8cffi import context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def setUp(self): pass def tearDown(self):", "patch.object(ctx, '__exit__', autospec=True) as r: ctx.tear_down() r.assert_called_once_with() def test_load_libs(self): \"\"\" It should run", "Has no .stack property self.assertEqual( '<anonymous>:2\\n' ' throw \"myException\";\\n' ' ^\\n' 'myException', get_exception_message(", "'function[]();')) # Has no .stack property self.assertEqual( '<anonymous>:2\\n' ' throw \"myException\";\\n' ' ^\\n'", "\"\"\" s = context._String() self.assertRaises(AssertionError, s.__exit__) with s as _: pass self.assertRaises(AssertionError, s.__exit__)", "except ImportError: from mock import patch, Mock import unittest import logging import os", "to re enter \"\"\" s = context._String() with s as _: self.assertRaises(AssertionError, s.__enter__)", "temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try: yield temp.name finally: os.remove(temp.name) class ContextTest(unittest.TestCase): def", "temp.close() try: yield temp.name finally: os.remove(temp.name) class ContextTest(unittest.TestCase): def setUp(self): self.vm = VM(platform)", "It should support with statement \"\"\" with context.Context(self.vm) as ctx: self.assertIsInstance(ctx, context.Context) def", "with patch.object(ctx, '__exit__', autospec=True) as r: ctx.tear_down() r.assert_called_once_with() def test_load_libs(self): \"\"\" It should", "r.v8cffi_free = free s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self): \"\"\" It should fail to re", "return ctx.run_script(script) except exceptions.V8JSError as ex: return six.text_type(ex) script_oops = ( 'function oops()", "s as _: self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data): temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try:", "long to display.\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops3 (<anonymous>:2:601)\\n' '", "as r: ctx.tear_down() r.assert_called_once_with() def test_load_libs(self): \"\"\" It should run the script file", "= [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr = string_ptr def test_to_bytes(self):", "self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def test_builtin_libs(self): \"\"\" It should pre-load builtin libraries \"\"\" with", "exceptions.V8JSError as ex: return six.text_type(ex) script_oops = ( 'function oops() {\\n' ' thereMayBeErrors();\\n'", "v8cffi.platform import platform from v8cffi.vm import VM from v8cffi import exceptions from v8cffi", "~Line too long to display.\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops3", "context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def test_to_str(self): \"\"\" It should support", "self.assertEqual(s.len_ptr[0], 0) def test_to_str(self): \"\"\" It should support str call \"\"\" with context._String()", "too long to display.\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops3 (<anonymous>:2:601)\\n'", "(<anonymous>:2:601)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\\n' ' nonExistentFunc();\\n' ' ^\\n' 'ReferenceError:", "a;' script_long = ( 'function oops3() {\\n' + var_a * 100 + 'thereMayBeMoreErrors();'", "not defined\\n' ' at oops (my_file_áéíóú.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\\n'", "It should fail to re exit \"\"\" s = context._String() self.assertRaises(AssertionError, s.__exit__) with", "as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s as _: self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data): temp", "not defined\\n' ' at oops3 (<anonymous>:2:601)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\\n'", "keep a reference to the VM \"\"\" ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def", "^\\n' 'ReferenceError: thereMayBeErrors is not defined\\n' ' at oops (my_file_áéíóú.js:2:3)\\n' ' at <anonymous>:1:1',", "context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_with(self): \"\"\"", "re create \"\"\" s = context._String() with s as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with", "^\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops2 (my_other_file.js:2:3)\\n' ' at <anonymous>:1:1',", "is not defined\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n' ' function[]();\\n' '", "'}') # todo: trim source line when too long with context.Context(self.vm) as ctx:", "' at <anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeMoreErrors", "get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n'", "ctx.run_script, 'foo') def test_builtin_libs(self): \"\"\" It should pre-load builtin libraries \"\"\" with context.Context(self.vm)", "self.assertRaises(AssertionError, s.__exit__) with s as _: pass self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self): \"\"\" It", "+ 'thereMayBeMoreErrors();' + var_a * 100 + '\\n' '}') # todo: trim source", "as ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self): \"\"\" It should run the script", "at <anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeMoreErrors is", "v8cffi import context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def", "+ var_a * 100 + 'thereMayBeMoreErrors();' + var_a * 100 + '\\n' '}')", "throw \"myException\";\\n' ' ^\\n' 'myException', get_exception_message( ctx, '(function() {\\n' ' throw \"myException\";\\n' '})();'))", "get_exception_message(ctx, 'function[]();')) # Has no .stack property self.assertEqual( '<anonymous>:2\\n' ' throw \"myException\";\\n' '", "' at <anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\\n' ' ~Line too long to display.\\n'", "self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with context.Context(self.vm) as", "context._String() s.__enter__() free = Mock() r.v8cffi_free = free s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self): \"\"\"", "V8\\ and get a useful traceback \"\"\" def get_exception_message(ctx, script): try: return ctx.run_script(script)", "self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self): \"\"\" It should run the script on V8\\", "as ex: return six.text_type(ex) script_oops = ( 'function oops() {\\n' ' thereMayBeErrors();\\n' '", "'\\n' '}') # todo: trim source line when too long with context.Context(self.vm) as", "my_var_2;\\n' '}') script_oops2 = ( 'function oops2() {\\n' ' thereMayBeMoreErrors();\\n' ' var my_var_2;\\n'", "VM) def test_with(self): \"\"\" It should support with statement \"\"\" with context.Context(self.vm) as", "oops3 (<anonymous>:2:601)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\\n' ' nonExistentFunc();\\n' ' ^\\n'", "string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr", "allow to re create \"\"\" s = context._String() with s as _: self.assertIsNotNone(s.string_ptr)", "ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self): \"\"\" It should run the script on V8\\ and", "the script on V8 \"\"\" script_foo = b'var foo = \"foo!\";' script_bar =", "the script on V8\\ and get a useful traceback \"\"\" def get_exception_message(ctx, script):", "patch.object(ctx, 'run_script', autospec=True) as r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def test_run_script(self): \"\"\" It should", "s = context._String() self.assertRaises(AssertionError, s.__exit__) with s as _: pass self.assertRaises(AssertionError, s.__exit__) def", "\"\"\" It should free the string \"\"\" with patch('v8cffi.context.lib', autospec=True) as r: s", "ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def test_builtin_libs(self): \"\"\" It should pre-load builtin libraries \"\"\"", "not defined\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n' ' function[]();\\n' ' ^\\n'", "def test_assert_on_re_exit(self): \"\"\" It should fail to re exit \"\"\" s = context._String()", "It should call __exit__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__exit__', autospec=True) as", "s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr = string_ptr def test_free(self): \"\"\" It should", "\"\"\" It should support str call \"\"\" with context._String() as s: string_ptr =", "def get_exception_message(ctx, script): try: return ctx.run_script(script) except exceptions.V8JSError as ex: return six.text_type(ex) script_oops", "Unexpected token [', get_exception_message(ctx, 'function[]();')) # Has no .stack property self.assertEqual( '<anonymous>:2\\n' '", "pass self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self): \"\"\" It should allow to re create \"\"\"", "get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\\n' ' ~Line too long to display.\\n' 'ReferenceError: thereMayBeMoreErrors is", "__future__ import unicode_literals try: from unittest.mock import patch, Mock except ImportError: from mock", "\"\"\" It should call __exit__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__exit__', autospec=True)", "# todo: trim source line when too long with context.Context(self.vm) as ctx: ctx.run_script(script_oops,", "support str call \"\"\" with context._String() as s: string_ptr = s.string_ptr s.string_ptr =", "s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(), b'foo')", "'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops2 (my_other_file.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx,", "= 'var txt = \"áéíóú\";' with context.Context(self.vm) as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\",", "s.len_ptr[0] = 3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr = string_ptr def test_to_bytes(self): \"\"\" It should", "ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with context.Context(self.vm) as ctx:", "self.assertEqual( 'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at", "return the string bytes \"\"\" with context._String() as s: string_ptr = s.string_ptr s.string_ptr", "libraries \"\"\" with context.Context(self.vm) as ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self): \"\"\" It", "autospec=True) as r: r.return_value = 'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def test_tear_down(self): \"\"\" It", "context._String() self.assertRaises(AssertionError, s.__exit__) with s as _: pass self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self): \"\"\"", "s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self): \"\"\" It should fail to re enter \"\"\" s", "exit \"\"\" s = context._String() self.assertRaises(AssertionError, s.__exit__) with s as _: pass self.assertRaises(AssertionError,", "at oops2 (my_other_file.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\\n' ' ~Line too", "0) def test_to_str(self): \"\"\" It should support str call \"\"\" with context._String() as", "import VM from v8cffi import exceptions from v8cffi import context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase):", "should fail to re enter \"\"\" s = context._String() with s as _:", "thereMayBeMoreErrors is not defined\\n' ' at oops3 (<anonymous>:2:601)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops3()'))", "*'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def test_to_str(self): \"\"\" It should support str", "self.assertEqual(s.to_bytes(), b'foo') s.string_ptr = string_ptr def test_free(self): \"\"\" It should free the string", "= Mock() r.v8cffi_free = free s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self): \"\"\" It should fail", "import unittest import logging import os import tempfile from contextlib import contextmanager import", "context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def test_builtin_libs(self): \"\"\" It should pre-load builtin", "self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s as _: self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data): temp = tempfile.NamedTemporaryFile(delete=False)", "from v8cffi.platform import platform from v8cffi.vm import VM from v8cffi import exceptions from", "= ( 'function oops2() {\\n' ' thereMayBeMoreErrors();\\n' ' var my_var_2;\\n' '}') var_a =", "script_foo = b'var foo = \"foo!\";' script_bar = 'var bar = \"bar!\";' script_special", "ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def", "s as _: self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self): \"\"\" It should fail to re", "= b'var foo = \"foo!\";' script_bar = 'var bar = \"bar!\";' script_special =", "context.Context(self.vm) as ctx: with patch.object(ctx, 'run_script', autospec=True) as r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def", "[context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr = string_ptr def test_to_bytes(self): \"\"\"", "with js_file(script) as path: with context.Context(self.vm) as ctx: with patch.object(ctx, 'run_script', autospec=True) as", "should pre-load builtin libraries \"\"\" with context.Context(self.vm) as ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);')) def", "is not defined\\n' ' at oops (my_file_áéíóú.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual(", "# Has no .stack property self.assertEqual( '<anonymous>:2\\n' ' throw \"myException\";\\n' ' ^\\n' 'myException',", "It should run the script file content on V8 \"\"\" script = b'var", "_: self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data): temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try: yield temp.name", "as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script,", "= 'var a;' script_long = ( 'function oops3() {\\n' + var_a * 100", "' throw \"myException\";\\n' ' ^\\n' 'myException', get_exception_message( ctx, '(function() {\\n' ' throw \"myException\";\\n'", "unittest import logging import os import tempfile from contextlib import contextmanager import six", "VM(platform) self.vm.set_up() def tearDown(self): self.vm.tear_down() def test_keep_vm(self): \"\"\" It should keep a reference", "def test_builtin_libs(self): \"\"\" It should pre-load builtin libraries \"\"\" with context.Context(self.vm) as ctx:", "= 3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr = string_ptr def test_to_bytes(self): \"\"\" It should return", "= free s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self): \"\"\" It should fail to re enter", "should run the script file content on V8 \"\"\" script = b'var foo", "{\\n' ' thereMayBeMoreErrors();\\n' ' var my_var_2;\\n' '}') var_a = 'var a;' script_long =", "nonExistentFunc is not defined\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n' ' function[]();\\n'", "with context.Context(self.vm) as ctx: with patch.object(ctx, 'run_script', autospec=True) as r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path)", "\"\"\" It should run the script on V8\\ and get a useful traceback", "as ctx: with patch.object(ctx, 'run_script', autospec=True) as r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def test_run_script(self):", "'var bar = \"bar!\";' script_special = 'var txt = \"áéíóú\";' with context.Context(self.vm) as", "'<anonymous>:1\\n' ' nonExistentFunc();\\n' ' ^\\n' 'ReferenceError: nonExistentFunc is not defined\\n' ' at <anonymous>:1:1',", "test_run_script(self): \"\"\" It should run the script on V8 \"\"\" script_foo = b'var", "[', get_exception_message(ctx, 'function[]();')) # Has no .stack property self.assertEqual( '<anonymous>:2\\n' ' throw \"myException\";\\n'", "__exit__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__exit__', autospec=True) as r: ctx.tear_down() r.assert_called_once_with()", "contextlib import contextmanager import six from v8cffi.platform import platform from v8cffi.vm import VM", "'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeErrors is not defined\\n' ' at oops", "\"foo!\";' script_bar = 'var bar = \"bar!\";' script_special = 'var txt = \"áéíóú\";'", "{\\n' ' thereMayBeErrors();\\n' ' var my_var_2;\\n' '}') script_oops2 = ( 'function oops2() {\\n'", "from v8cffi import exceptions from v8cffi import context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def setUp(self):", "def setUp(self): self.vm = VM(platform) self.vm.set_up() def tearDown(self): self.vm.tear_down() def test_keep_vm(self): \"\"\" It", "self.assertEqual( '<anonymous>:1\\n' ' function[]();\\n' ' ^\\n' 'SyntaxError: Unexpected token [', get_exception_message(ctx, 'function[]();')) #", "s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr = string_ptr def", "\"\"\" script_foo = b'var foo = \"foo!\";' script_bar = 'var bar = \"bar!\";'", "test_assert_on_re_create(self): \"\"\" It should allow to re create \"\"\" s = context._String() with", "ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self): \"\"\" It should run the script on", "as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(six.text_type(s),", "ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeErrors is not defined\\n' '", "' thereMayBeErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeErrors is not defined\\n' ' at oops (my_file_áéíóú.js:2:3)\\n'", "should allow to re create \"\"\" s = context._String() with s as _:", "\"foo\";' with js_file(script) as path: with context.Context(self.vm) as ctx: with patch.object(ctx, 'run_script', autospec=True)", "It should allow to re create \"\"\" s = context._String() with s as", "file content on V8 \"\"\" script = b'var foo = \"foo\";' with js_file(script)", "thereMayBeErrors();\\n' ' var my_var_2;\\n' '}') script_oops2 = ( 'function oops2() {\\n' ' thereMayBeMoreErrors();\\n'", "ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError,", "-*- from __future__ import unicode_literals try: from unittest.mock import patch, Mock except ImportError:", "todo: trim source line when too long with context.Context(self.vm) as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js')", "def test_free(self): \"\"\" It should free the string \"\"\" with patch('v8cffi.context.lib', autospec=True) as", "s: self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL)", "to re create \"\"\" s = context._String() with s as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr)", "= \"áéíóú\";' with context.Context(self.vm) as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar'))", "from mock import patch, Mock import unittest import logging import os import tempfile", "test_tear_down(self): \"\"\" It should call __exit__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__exit__',", "should call __enter__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__enter__', autospec=True) as r:", "ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script,", "\"\"\" ctx = context.Context(self.vm) self.assertIsInstance(ctx._vm, VM) def test_with(self): \"\"\" It should support with", "identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeErrors is", "as r: s = context._String() s.__enter__() free = Mock() r.v8cffi_free = free s.__exit__()", "ctx = context.Context(self.vm) with patch.object(ctx, '__enter__', autospec=True) as r: r.return_value = 'foo' self.assertEqual(ctx.set_up(),", "string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr", "'<anonymous>:2\\n' ' ~Line too long to display.\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' '", "It should support with statement \"\"\" with context._String() as s: self.assertIsInstance(s, context._String) self.assertEqual(", "# -*- coding: utf-8 -*- from __future__ import unicode_literals try: from unittest.mock import", "trim source line when too long with context.Context(self.vm) as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2,", "\"\"\" It should pre-load builtin libraries \"\"\" with context.Context(self.vm) as ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10,", "function[]();\\n' ' ^\\n' 'SyntaxError: Unexpected token [', get_exception_message(ctx, 'function[]();')) # Has no .stack", "<anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\\n' ' ~Line too long to display.\\n' 'ReferenceError: thereMayBeMoreErrors", "as _: self.assertRaises(AssertionError, s.__enter__) def test_assert_on_re_exit(self): \"\"\" It should fail to re exit", "string bytes \"\"\" with context._String() as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]',", "s.__exit__) def test_assert_on_re_create(self): \"\"\" It should allow to re create \"\"\" s =", "\"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__exit__', autospec=True) as r: ctx.tear_down() r.assert_called_once_with() def", "'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n' ' function[]();\\n' ' ^\\n' 'SyntaxError: Unexpected token [', get_exception_message(ctx, 'function[]();'))", "ctx: self.assertIsInstance(ctx, context.Context) def test_set_up(self): \"\"\" It should call __enter__ \"\"\" ctx =", "It should run the script on V8 \"\"\" script_foo = b'var foo =", "except exceptions.V8JSError as ex: return six.text_type(ex) script_oops = ( 'function oops() {\\n' '", "test_with(self): \"\"\" It should support with statement \"\"\" with context._String() as s: self.assertIsInstance(s,", "autospec=True) as r: s = context._String() s.__enter__() free = Mock() r.v8cffi_free = free", "autospec=True) as r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def test_run_script(self): \"\"\" It should run the", "r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def test_run_script(self): \"\"\" It should run the script on", "\"\"\" with patch('v8cffi.context.lib', autospec=True) as r: s = context._String() s.__enter__() free = Mock()", "with context._String() as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] =", "pre-load builtin libraries \"\"\" with context.Context(self.vm) as ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self):", "s.string_ptr = string_ptr def test_free(self): \"\"\" It should free the string \"\"\" with", "s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(six.text_type(s), 'foo')", "@contextmanager def js_file(data): temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try: yield temp.name finally: os.remove(temp.name)", "as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n' ' ^\\n'", "' ^\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops2 (my_other_file.js:2:3)\\n' ' at", "source line when too long with context.Context(self.vm) as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js')", "the string \"\"\" with patch('v8cffi.context.lib', autospec=True) as r: s = context._String() s.__enter__() free", "context.Context(self.vm) with patch.object(ctx, '__enter__', autospec=True) as r: r.return_value = 'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with()", "'foo') def test_builtin_libs(self): \"\"\" It should pre-load builtin libraries \"\"\" with context.Context(self.vm) as", "unicode_literals try: from unittest.mock import patch, Mock except ImportError: from mock import patch,", "fail to re exit \"\"\" s = context._String() self.assertRaises(AssertionError, s.__exit__) with s as", "self.assertIsInstance(ctx, context.Context) def test_set_up(self): \"\"\" It should call __enter__ \"\"\" ctx = context.Context(self.vm)", "script_long = ( 'function oops3() {\\n' + var_a * 100 + 'thereMayBeMoreErrors();' +", "context.Context(self.vm) with patch.object(ctx, '__exit__', autospec=True) as r: ctx.tear_down() r.assert_called_once_with() def test_load_libs(self): \"\"\" It", "run the script on V8\\ and get a useful traceback \"\"\" def get_exception_message(ctx,", "'run_script', autospec=True) as r: ctx.load_libs([path]) r.assert_called_once_with(script, identifier=path) def test_run_script(self): \"\"\" It should run", "= context.Context(self.vm) with patch.object(ctx, '__exit__', autospec=True) as r: ctx.tear_down() r.assert_called_once_with() def test_load_libs(self): \"\"\"", "' nonExistentFunc();\\n' ' ^\\n' 'ReferenceError: nonExistentFunc is not defined\\n' ' at <anonymous>:1:1', get_exception_message(ctx,", "thereMayBeErrors is not defined\\n' ' at oops (my_file_áéíóú.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops()'))", "\"\"\" It should support with statement \"\"\" with context.Context(self.vm) as ctx: self.assertIsInstance(ctx, context.Context)", "It should support str call \"\"\" with context._String() as s: string_ptr = s.string_ptr", "self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data): temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try: yield temp.name finally:", "' ^\\n' 'ReferenceError: thereMayBeErrors is not defined\\n' ' at oops (my_file_áéíóú.js:2:3)\\n' ' at", "' ^\\n' 'SyntaxError: Unexpected token [', get_exception_message(ctx, 'function[]();')) # Has no .stack property", "from v8cffi import context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass", "tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try: yield temp.name finally: os.remove(temp.name) class ContextTest(unittest.TestCase): def setUp(self): self.vm", "def test_assert_on_re_create(self): \"\"\" It should allow to re create \"\"\" s = context._String()", "oops() {\\n' ' thereMayBeErrors();\\n' ' var my_var_2;\\n' '}') script_oops2 = ( 'function oops2()", "with context.Context(self.vm) as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt'))", "test_free(self): \"\"\" It should free the string \"\"\" with patch('v8cffi.context.lib', autospec=True) as r:", "not defined\\n' ' at oops2 (my_other_file.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\\n'", "oops3() {\\n' + var_a * 100 + 'thereMayBeMoreErrors();' + var_a * 100 +", "\"\"\" It should return the string bytes \"\"\" with context._String() as s: string_ptr", "s.__enter__() free = Mock() r.v8cffi_free = free s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self): \"\"\" It", "identifier=path) def test_run_script(self): \"\"\" It should run the script on V8 \"\"\" script_foo", "import context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_with(self):", "script = b'var foo = \"foo\";' with js_file(script) as path: with context.Context(self.vm) as", "from v8cffi.vm import VM from v8cffi import exceptions from v8cffi import context logging.disable(logging.CRITICAL)", "test_to_bytes(self): \"\"\" It should return the string bytes \"\"\" with context._String() as s:", "self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def test_tear_down(self): \"\"\" It should call __exit__ \"\"\" ctx =", "* 100 + '\\n' '}') # todo: trim source line when too long", "3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr = string_ptr def test_to_bytes(self): \"\"\" It should return the", "'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeMoreErrors is not defined\\n' ' at oops2", "' ^\\n' 'ReferenceError: nonExistentFunc is not defined\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual(", "It should free the string \"\"\" with patch('v8cffi.context.lib', autospec=True) as r: s =", "too long with context.Context(self.vm) as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n'", "self.assertIsInstance(ctx._vm, VM) def test_with(self): \"\"\" It should support with statement \"\"\" with context.Context(self.vm)", "get_exception_message(ctx, script): try: return ctx.run_script(script) except exceptions.V8JSError as ex: return six.text_type(ex) script_oops =", "r.return_value = 'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def test_tear_down(self): \"\"\" It should call __exit__", "to re exit \"\"\" s = context._String() self.assertRaises(AssertionError, s.__exit__) with s as _:", "test_set_up(self): \"\"\" It should call __enter__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__enter__',", "self.assertEqual( context.ffi.typeof('size_t *'), context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def test_to_str(self): \"\"\" It should", "str call \"\"\" with context._String() as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]',", "with statement \"\"\" with context.Context(self.vm) as ctx: self.assertIsInstance(ctx, context.Context) def test_set_up(self): \"\"\" It", "as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def test_builtin_libs(self): \"\"\" It should pre-load builtin libraries", "Mock import unittest import logging import os import tempfile from contextlib import contextmanager", "__enter__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__enter__', autospec=True) as r: r.return_value =", "os.remove(temp.name) class ContextTest(unittest.TestCase): def setUp(self): self.vm = VM(platform) self.vm.set_up() def tearDown(self): self.vm.tear_down() def", "at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n' ' function[]();\\n' ' ^\\n' 'SyntaxError: Unexpected token", "'function oops3() {\\n' + var_a * 100 + 'thereMayBeMoreErrors();' + var_a * 100", "should fail to re exit \"\"\" s = context._String() self.assertRaises(AssertionError, s.__exit__) with s", "import patch, Mock import unittest import logging import os import tempfile from contextlib", "b'foo') s.string_ptr = string_ptr def test_free(self): \"\"\" It should free the string \"\"\"", "with context.Context(self.vm) as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n'", "ctx.run_script, 'function[]();') with context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo') def test_builtin_libs(self): \"\"\" It", "= [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(), b'foo') s.string_ptr = string_ptr def test_free(self):", "= s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr =", "call __exit__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__exit__', autospec=True) as r: ctx.tear_down()", "context.ffi.typeof(s.len_ptr)) self.assertEqual(s.string_ptr[0], context.ffi.NULL) self.assertEqual(s.len_ptr[0], 0) def test_to_str(self): \"\"\" It should support str call", "should free the string \"\"\" with patch('v8cffi.context.lib', autospec=True) as r: s = context._String()", "r: ctx.tear_down() r.assert_called_once_with() def test_load_libs(self): \"\"\" It should run the script file content", "\"bar!\";' script_special = 'var txt = \"áéíóú\";' with context.Context(self.vm) as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar)", "import exceptions from v8cffi import context logging.disable(logging.CRITICAL) class StringTest(unittest.TestCase): def setUp(self): pass def", "at oops3 (<anonymous>:2:601)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\\n' ' nonExistentFunc();\\n' '", "ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\", ctx.run_script('txt')) self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script,", "'foo') s.string_ptr = string_ptr def test_to_bytes(self): \"\"\" It should return the string bytes", "def test_run_script_trace_back(self): \"\"\" It should run the script on V8\\ and get a", "It should call __enter__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__enter__', autospec=True) as", "contextmanager import six from v8cffi.platform import platform from v8cffi.vm import VM from v8cffi", "defined\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n' ' function[]();\\n' ' ^\\n' 'SyntaxError:", "as ctx: self.assertIsInstance(ctx, context.Context) def test_set_up(self): \"\"\" It should call __enter__ \"\"\" ctx", "\"\"\" s = context._String() with s as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s as", "at <anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\\n' ' ~Line too long to display.\\n' 'ReferenceError:", "(my_file_áéíóú.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n' ' ^\\n' 'ReferenceError:", "ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeErrors", "self.assertIsNone(s.string_ptr) with s as _: self.assertIsNotNone(s.string_ptr) @contextmanager def js_file(data): temp = tempfile.NamedTemporaryFile(delete=False) temp.write(data)", "= context._String() with s as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s as _: self.assertIsNotNone(s.string_ptr)", "import tempfile from contextlib import contextmanager import six from v8cffi.platform import platform from", "bar = \"bar!\";' script_special = 'var txt = \"áéíóú\";' with context.Context(self.vm) as ctx:", "ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeErrors is not", "with context._String() as s: self.assertIsInstance(s, context._String) self.assertEqual( context.ffi.typeof('char **'), context.ffi.typeof(s.string_ptr)) self.assertEqual( context.ffi.typeof('size_t *'),", "defined\\n' ' at oops3 (<anonymous>:2:601)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops3()')) self.assertEqual( '<anonymous>:1\\n' '", "^\\n' 'SyntaxError: Unexpected token [', get_exception_message(ctx, 'function[]();')) # Has no .stack property self.assertEqual(", "s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr = string_ptr", "oops (my_file_áéíóú.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\\n' ' thereMayBeMoreErrors();\\n' ' ^\\n'", "defined\\n' ' at oops (my_file_áéíóú.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops()')) self.assertEqual( 'my_other_file.js:2\\n' '", "def test_with(self): \"\"\" It should support with statement \"\"\" with context._String() as s:", "self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self): \"\"\" It should allow to re create \"\"\" s", "test_run_script_trace_back(self): \"\"\" It should run the script on V8\\ and get a useful", "class ContextTest(unittest.TestCase): def setUp(self): self.vm = VM(platform) self.vm.set_up() def tearDown(self): self.vm.tear_down() def test_keep_vm(self):", "ContextTest(unittest.TestCase): def setUp(self): self.vm = VM(platform) self.vm.set_up() def tearDown(self): self.vm.tear_down() def test_keep_vm(self): \"\"\"", "s.string_ptr = string_ptr def test_to_bytes(self): \"\"\" It should return the string bytes \"\"\"", "call __enter__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__enter__', autospec=True) as r: r.return_value", "s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(six.text_type(s), 'foo') s.string_ptr = string_ptr def", "\"\"\" with context.Context(self.vm) as ctx: self.assertIsInstance(ctx, context.Context) def test_set_up(self): \"\"\" It should call", "patch, Mock import unittest import logging import os import tempfile from contextlib import", "'function oops2() {\\n' ' thereMayBeMoreErrors();\\n' ' var my_var_2;\\n' '}') var_a = 'var a;'", "= tempfile.NamedTemporaryFile(delete=False) temp.write(data) temp.close() try: yield temp.name finally: os.remove(temp.name) class ContextTest(unittest.TestCase): def setUp(self):", "free = Mock() r.v8cffi_free = free s.__exit__() self.assertTrue(free.called) def test_assert_on_re_enter(self): \"\"\" It should", "r: s = context._String() s.__enter__() free = Mock() r.v8cffi_free = free s.__exit__() self.assertTrue(free.called)", "\"\"\" It should allow to re create \"\"\" s = context._String() with s", "^\\n' 'ReferenceError: nonExistentFunc is not defined\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n'", "s = context._String() with s as _: self.assertIsNotNone(s.string_ptr) self.assertIsNone(s.string_ptr) with s as _:", "js_file(script) as path: with context.Context(self.vm) as ctx: with patch.object(ctx, 'run_script', autospec=True) as r:", "thereMayBeMoreErrors();\\n' ' var my_var_2;\\n' '}') var_a = 'var a;' script_long = ( 'function", "self.assertEqual( '<anonymous>:2\\n' ' throw \"myException\";\\n' ' ^\\n' 'myException', get_exception_message( ctx, '(function() {\\n' '", "-*- coding: utf-8 -*- from __future__ import unicode_literals try: from unittest.mock import patch,", "import contextmanager import six from v8cffi.platform import platform from v8cffi.vm import VM from", "def tearDown(self): pass def test_with(self): \"\"\" It should support with statement \"\"\" with", "logging import os import tempfile from contextlib import contextmanager import six from v8cffi.platform", "s as _: pass self.assertRaises(AssertionError, s.__exit__) def test_assert_on_re_create(self): \"\"\" It should allow to", "when too long with context.Context(self.vm) as ctx: ctx.run_script(script_oops, identifier='my_file_áéíóú.js') ctx.run_script(script_oops2, identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual(", "\"\"\" It should call __enter__ \"\"\" ctx = context.Context(self.vm) with patch.object(ctx, '__enter__', autospec=True)", "s.__enter__) def test_assert_on_re_exit(self): \"\"\" It should fail to re exit \"\"\" s =", "as s: string_ptr = s.string_ptr s.string_ptr = [context.ffi.new('char[]', b'foo')] s.len_ptr[0] = 3 self.assertEqual(s.to_bytes(),", "fail to re enter \"\"\" s = context._String() with s as _: self.assertRaises(AssertionError,", "\"\"\" It should run the script on V8 \"\"\" script_foo = b'var foo", "' at oops2 (my_other_file.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\\n' ' ~Line", "'__enter__', autospec=True) as r: r.return_value = 'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def test_tear_down(self): \"\"\"", "\"áéíóú\";' with context.Context(self.vm) as ctx: ctx.run_script(script_foo) ctx.run_script(script_bar) ctx.run_script(script_special) self.assertEqual(\"foo!\", ctx.run_script(b'foo')) self.assertEqual(\"bar!\", ctx.run_script('bar')) self.assertEqual(\"áéíóú\",", "<anonymous>:1:1', get_exception_message(ctx, 'nonExistentFunc();')) self.assertEqual( '<anonymous>:1\\n' ' function[]();\\n' ' ^\\n' 'SyntaxError: Unexpected token [',", "self.assertRaises(exceptions.V8JSError, ctx.run_script, 'baz') self.assertRaises(exceptions.V8JSError, ctx.run_script, 'function[]();') with context.Context(self.vm) as ctx: self.assertRaises(exceptions.V8JSError, ctx.run_script, 'foo')", "'}') script_oops2 = ( 'function oops2() {\\n' ' thereMayBeMoreErrors();\\n' ' var my_var_2;\\n' '}')", "script_bar = 'var bar = \"bar!\";' script_special = 'var txt = \"áéíóú\";' with", "s = context._String() s.__enter__() free = Mock() r.v8cffi_free = free s.__exit__() self.assertTrue(free.called) def", "(my_other_file.js:2:3)\\n' ' at <anonymous>:1:1', get_exception_message(ctx, 'oops2()')) self.assertEqual( '<anonymous>:2\\n' ' ~Line too long to", "r: r.return_value = 'foo' self.assertEqual(ctx.set_up(), 'foo') r.assert_called_once_with() def test_tear_down(self): \"\"\" It should call", "context.Context(self.vm) as ctx: self.assertEqual(\"20\", ctx.run_script('Math.max(10, 20);')) def test_run_script_trace_back(self): \"\"\" It should run the", "temp.name finally: os.remove(temp.name) class ContextTest(unittest.TestCase): def setUp(self): self.vm = VM(platform) self.vm.set_up() def tearDown(self):", "' thereMayBeMoreErrors();\\n' ' var my_var_2;\\n' '}') var_a = 'var a;' script_long = (", "run the script file content on V8 \"\"\" script = b'var foo =", "identifier='my_other_file.js') ctx.run_script(script_long) self.assertEqual( 'my_file_áéíóú.js:2\\n' ' thereMayBeErrors();\\n' ' ^\\n' 'ReferenceError: thereMayBeErrors is not defined\\n'", "should return the string bytes \"\"\" with context._String() as s: string_ptr = s.string_ptr", "try: from unittest.mock import patch, Mock except ImportError: from mock import patch, Mock", "{\\n' + var_a * 100 + 'thereMayBeMoreErrors();' + var_a * 100 + '\\n'", "\"\"\" It should keep a reference to the VM \"\"\" ctx = context.Context(self.vm)", "V8 \"\"\" script_foo = b'var foo = \"foo!\";' script_bar = 'var bar =" ]
[ "conversion_factor temperature = 27 - (reading - 0.706)/0.001721 file.write(str(temperature) + \"\\n\") file.flush() utime.sleep(10)", "# Use of this source code is governed by a BSD-style license that", "code is governed by a BSD-style license that can be # found in", "#+-+-+-+-+-+-+-+-+-+-+-+ # Copyright (c) 2021, <EMAIL> # All rights reserved. # Use of", "# # Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico import machine import utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP)", "source code is governed by a BSD-style license that can be # found", "of this source code is governed by a BSD-style license that can be", "rights reserved. # Use of this source code is governed by a BSD-style", "sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3 / (65535) file = open(\"temps.txt\", \"w\") while", "(65535) file = open(\"temps.txt\", \"w\") while True: reading = sensor_temp.read_u16() * conversion_factor temperature", "Copyright (c) 2021, <EMAIL> # All rights reserved. # Use of this source", "utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3 / (65535) file = open(\"temps.txt\", \"w\")", "conversion_factor = 3.3 / (65535) file = open(\"temps.txt\", \"w\") while True: reading =", "= open(\"temps.txt\", \"w\") while True: reading = sensor_temp.read_u16() * conversion_factor temperature = 27", "= sensor_temp.read_u16() * conversion_factor temperature = 27 - (reading - 0.706)/0.001721 file.write(str(temperature) +", "file = open(\"temps.txt\", \"w\") while True: reading = sensor_temp.read_u16() * conversion_factor temperature =", "machine import utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3 / (65535) file =", "\"w\") while True: reading = sensor_temp.read_u16() * conversion_factor temperature = 27 - (reading", "p111 at https://hackspace.raspberrypi.org/books/micropython-pico import machine import utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3", "# Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico import machine import utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor", "#|R|i|c|e|L|e|e|.|c|o|m| #+-+-+-+-+-+-+-+-+-+-+-+ # Copyright (c) 2021, <EMAIL> # All rights reserved. # Use", "while True: reading = sensor_temp.read_u16() * conversion_factor temperature = 27 - (reading -", "import utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3 / (65535) file = open(\"temps.txt\",", "license that can be # found in the LICENSE file. # # Origin:", "be # found in the LICENSE file. # # Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico", "Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico import machine import utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor =", "at https://hackspace.raspberrypi.org/books/micropython-pico import machine import utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3 /", "open(\"temps.txt\", \"w\") while True: reading = sensor_temp.read_u16() * conversion_factor temperature = 27 -", "BSD-style license that can be # found in the LICENSE file. # #", "found in the LICENSE file. # # Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico import machine", "3.3 / (65535) file = open(\"temps.txt\", \"w\") while True: reading = sensor_temp.read_u16() *", "a BSD-style license that can be # found in the LICENSE file. #", "All rights reserved. # Use of this source code is governed by a", "LICENSE file. # # Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico import machine import utime sensor_temp", "/ (65535) file = open(\"temps.txt\", \"w\") while True: reading = sensor_temp.read_u16() * conversion_factor", "this source code is governed by a BSD-style license that can be #", "by a BSD-style license that can be # found in the LICENSE file.", "= 3.3 / (65535) file = open(\"temps.txt\", \"w\") while True: reading = sensor_temp.read_u16()", "<EMAIL> # All rights reserved. # Use of this source code is governed", "2021, <EMAIL> # All rights reserved. # Use of this source code is", "Use of this source code is governed by a BSD-style license that can", "the LICENSE file. # # Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico import machine import utime", "file. # # Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico import machine import utime sensor_temp =", "is governed by a BSD-style license that can be # found in the", "* conversion_factor temperature = 27 - (reading - 0.706)/0.001721 file.write(str(temperature) + \"\\n\") file.flush()", "#!/usr/bin/python3 #+-+-+-+-+-+-+-+-+-+-+-+ #|R|i|c|e|L|e|e|.|c|o|m| #+-+-+-+-+-+-+-+-+-+-+-+ # Copyright (c) 2021, <EMAIL> # All rights reserved.", "# All rights reserved. # Use of this source code is governed by", "can be # found in the LICENSE file. # # Origin: p111 at", "# found in the LICENSE file. # # Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico import", "= machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3 / (65535) file = open(\"temps.txt\", \"w\") while True:", "in the LICENSE file. # # Origin: p111 at https://hackspace.raspberrypi.org/books/micropython-pico import machine import", "# Copyright (c) 2021, <EMAIL> # All rights reserved. # Use of this", "machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3 / (65535) file = open(\"temps.txt\", \"w\") while True: reading", "https://hackspace.raspberrypi.org/books/micropython-pico import machine import utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3 / (65535)", "True: reading = sensor_temp.read_u16() * conversion_factor temperature = 27 - (reading - 0.706)/0.001721", "governed by a BSD-style license that can be # found in the LICENSE", "reading = sensor_temp.read_u16() * conversion_factor temperature = 27 - (reading - 0.706)/0.001721 file.write(str(temperature)", "#+-+-+-+-+-+-+-+-+-+-+-+ #|R|i|c|e|L|e|e|.|c|o|m| #+-+-+-+-+-+-+-+-+-+-+-+ # Copyright (c) 2021, <EMAIL> # All rights reserved. #", "import machine import utime sensor_temp = machine.ADC(machine.ADC.CORE_TEMP) conversion_factor = 3.3 / (65535) file", "sensor_temp.read_u16() * conversion_factor temperature = 27 - (reading - 0.706)/0.001721 file.write(str(temperature) + \"\\n\")", "reserved. # Use of this source code is governed by a BSD-style license", "(c) 2021, <EMAIL> # All rights reserved. # Use of this source code", "that can be # found in the LICENSE file. # # Origin: p111" ]
[ "null=True)), ('region', models.CharField(blank=True, max_length=20, null=True)), ('slug', models.SlugField(default='test')), ('image_url', models.CharField(blank=True, max_length=50, null=True)), ], ),", "[ migrations.CreateModel( name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50, null=True)),", "3.2 on 2021-04-18 06:36 from django.db import migrations, models class Migration(migrations.Migration): initial =", "2021-04-18 06:36 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies", "('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50, null=True)), ('category', models.CharField(blank=True, max_length=10, null=True)),", "('region', models.CharField(blank=True, max_length=20, null=True)), ('slug', models.SlugField(default='test')), ('image_url', models.CharField(blank=True, max_length=50, null=True)), ], ), ]", "on 2021-04-18 06:36 from django.db import migrations, models class Migration(migrations.Migration): initial = True", "= [ ] operations = [ migrations.CreateModel( name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False,", "primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50, null=True)), ('category', models.CharField(blank=True, max_length=10, null=True)), ('instructions', models.CharField(blank=True,", "dependencies = [ ] operations = [ migrations.CreateModel( name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True,", "models.CharField(blank=True, max_length=4000, null=True)), ('region', models.CharField(blank=True, max_length=20, null=True)), ('slug', models.SlugField(default='test')), ('image_url', models.CharField(blank=True, max_length=50, null=True)),", "= [ migrations.CreateModel( name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50,", "django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ]", "= True dependencies = [ ] operations = [ migrations.CreateModel( name='Meal', fields=[ ('id',", "] operations = [ migrations.CreateModel( name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "max_length=50, null=True)), ('category', models.CharField(blank=True, max_length=10, null=True)), ('instructions', models.CharField(blank=True, max_length=4000, null=True)), ('region', models.CharField(blank=True, max_length=20,", "('category', models.CharField(blank=True, max_length=10, null=True)), ('instructions', models.CharField(blank=True, max_length=4000, null=True)), ('region', models.CharField(blank=True, max_length=20, null=True)), ('slug',", "[ ] operations = [ migrations.CreateModel( name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "# Generated by Django 3.2 on 2021-04-18 06:36 from django.db import migrations, models", "from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [", "models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [", "migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations =", "Django 3.2 on 2021-04-18 06:36 from django.db import migrations, models class Migration(migrations.Migration): initial", "fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50, null=True)), ('category', models.CharField(blank=True, max_length=10,", "Generated by Django 3.2 on 2021-04-18 06:36 from django.db import migrations, models class", "null=True)), ('instructions', models.CharField(blank=True, max_length=4000, null=True)), ('region', models.CharField(blank=True, max_length=20, null=True)), ('slug', models.SlugField(default='test')), ('image_url', models.CharField(blank=True,", "class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel(", "name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50, null=True)), ('category', models.CharField(blank=True,", "models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50, null=True)), ('category', models.CharField(blank=True, max_length=10, null=True)), ('instructions',", "models.CharField(blank=True, max_length=50, null=True)), ('category', models.CharField(blank=True, max_length=10, null=True)), ('instructions', models.CharField(blank=True, max_length=4000, null=True)), ('region', models.CharField(blank=True,", "max_length=4000, null=True)), ('region', models.CharField(blank=True, max_length=20, null=True)), ('slug', models.SlugField(default='test')), ('image_url', models.CharField(blank=True, max_length=50, null=True)), ],", "Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Meal',", "import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations", "serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50, null=True)), ('category', models.CharField(blank=True, max_length=10, null=True)), ('instructions', models.CharField(blank=True, max_length=4000,", "null=True)), ('category', models.CharField(blank=True, max_length=10, null=True)), ('instructions', models.CharField(blank=True, max_length=4000, null=True)), ('region', models.CharField(blank=True, max_length=20, null=True)),", "models.CharField(blank=True, max_length=10, null=True)), ('instructions', models.CharField(blank=True, max_length=4000, null=True)), ('region', models.CharField(blank=True, max_length=20, null=True)), ('slug', models.SlugField(default='test')),", "06:36 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies =", "('instructions', models.CharField(blank=True, max_length=4000, null=True)), ('region', models.CharField(blank=True, max_length=20, null=True)), ('slug', models.SlugField(default='test')), ('image_url', models.CharField(blank=True, max_length=50,", "initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Meal', fields=[", "verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50, null=True)), ('category', models.CharField(blank=True, max_length=10, null=True)), ('instructions', models.CharField(blank=True, max_length=4000, null=True)),", "by Django 3.2 on 2021-04-18 06:36 from django.db import migrations, models class Migration(migrations.Migration):", "('name', models.CharField(blank=True, max_length=50, null=True)), ('category', models.CharField(blank=True, max_length=10, null=True)), ('instructions', models.CharField(blank=True, max_length=4000, null=True)), ('region',", "migrations.CreateModel( name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True, max_length=50, null=True)), ('category',", "max_length=10, null=True)), ('instructions', models.CharField(blank=True, max_length=4000, null=True)), ('region', models.CharField(blank=True, max_length=20, null=True)), ('slug', models.SlugField(default='test')), ('image_url',", "operations = [ migrations.CreateModel( name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(blank=True,", "True dependencies = [ ] operations = [ migrations.CreateModel( name='Meal', fields=[ ('id', models.BigAutoField(auto_created=True," ]
[ "Eric \"\"\" import glob import pandas as pd files = glob.glob(\"./split_files/*.csv\") for file_name", "files = glob.glob(\"./split_files/*.csv\") for file_name in files: new_name = file_name.split(\".csv\")[0] + '.json' df", "in files: new_name = file_name.split(\".csv\")[0] + '.json' df = pd.read_csv(file_name, engine = 'python',", "Dec 10 11:32:52 2018 @author: Eric \"\"\" import glob import pandas as pd", "import pandas as pd files = glob.glob(\"./split_files/*.csv\") for file_name in files: new_name =", "10 11:32:52 2018 @author: Eric \"\"\" import glob import pandas as pd files", "\"\"\" Created on Mon Dec 10 11:32:52 2018 @author: Eric \"\"\" import glob", "2018 @author: Eric \"\"\" import glob import pandas as pd files = glob.glob(\"./split_files/*.csv\")", "-*- coding: utf-8 -*- \"\"\" Created on Mon Dec 10 11:32:52 2018 @author:", "utf-8 -*- \"\"\" Created on Mon Dec 10 11:32:52 2018 @author: Eric \"\"\"", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Dec 10 11:32:52 2018", "glob.glob(\"./split_files/*.csv\") for file_name in files: new_name = file_name.split(\".csv\")[0] + '.json' df = pd.read_csv(file_name,", "import glob import pandas as pd files = glob.glob(\"./split_files/*.csv\") for file_name in files:", "as pd files = glob.glob(\"./split_files/*.csv\") for file_name in files: new_name = file_name.split(\".csv\")[0] +", "for file_name in files: new_name = file_name.split(\".csv\")[0] + '.json' df = pd.read_csv(file_name, engine", "pandas as pd files = glob.glob(\"./split_files/*.csv\") for file_name in files: new_name = file_name.split(\".csv\")[0]", "new_name = file_name.split(\".csv\")[0] + '.json' df = pd.read_csv(file_name, engine = 'python', encoding =", "on Mon Dec 10 11:32:52 2018 @author: Eric \"\"\" import glob import pandas", "\"\"\" import glob import pandas as pd files = glob.glob(\"./split_files/*.csv\") for file_name in", "-*- \"\"\" Created on Mon Dec 10 11:32:52 2018 @author: Eric \"\"\" import", "Created on Mon Dec 10 11:32:52 2018 @author: Eric \"\"\" import glob import", "glob import pandas as pd files = glob.glob(\"./split_files/*.csv\") for file_name in files: new_name", "Mon Dec 10 11:32:52 2018 @author: Eric \"\"\" import glob import pandas as", "file_name.split(\".csv\")[0] + '.json' df = pd.read_csv(file_name, engine = 'python', encoding = 'utf-8') df.to_json(new_name)", "= glob.glob(\"./split_files/*.csv\") for file_name in files: new_name = file_name.split(\".csv\")[0] + '.json' df =", "file_name in files: new_name = file_name.split(\".csv\")[0] + '.json' df = pd.read_csv(file_name, engine =", "= file_name.split(\".csv\")[0] + '.json' df = pd.read_csv(file_name, engine = 'python', encoding = 'utf-8')", "11:32:52 2018 @author: Eric \"\"\" import glob import pandas as pd files =", "pd files = glob.glob(\"./split_files/*.csv\") for file_name in files: new_name = file_name.split(\".csv\")[0] + '.json'", "coding: utf-8 -*- \"\"\" Created on Mon Dec 10 11:32:52 2018 @author: Eric", "@author: Eric \"\"\" import glob import pandas as pd files = glob.glob(\"./split_files/*.csv\") for", "files: new_name = file_name.split(\".csv\")[0] + '.json' df = pd.read_csv(file_name, engine = 'python', encoding" ]
[ "have debug info by default\", matching=False, substrs=[\"global_foo\"], ) self.expect( \"target variable global_foo --shlib", "global_foo in symbol table should hydrate debug info\", matching=True, substrs=[\"global_foo = 321\"], )", "self, \"foo.c\", 4, num_expected_locations=1, loc_exact=True ) # Now launch the process, and do", "default\", matching=False, substrs=[\"global_foo\"], ) self.expect( \"target variable global_foo --shlib \" + self.shared_lib_name, \"Match", "unittest2 from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil as lldbutil", "self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, \"foo.c\", 4, num_expected_locations=1, loc_exact=True ) # Now launch the", "Set breakpoint 0 here.\") self.shlib_names = [\"foo\"] def common_setup(self): # Run in synchronous", "\" + self.shared_lib_name, \"Match global_foo in symbol table should hydrate debug info\", matching=True,", "* from lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil as lldbutil class SharedLibTestCase(TestBase): mydir =", "self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The stop reason of the thread should", "self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self,", "+ ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, \"foo.c\", 4, num_expected_locations=1, loc_exact=True", "do not stop at entry point. process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() )", "= stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self): self.build() self.common_setup()", "ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, \"foo.c\", 4, num_expected_locations=1, loc_exact=True )", "stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame", "# Find the line number to break inside main(). self.source = \"shared.c\" self.line", "lldbutil.run_break_set_by_file_and_line( self, \"foo.c\", 4, num_expected_locations=1, loc_exact=True ) # Now launch the process, and", "self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self,", ") self.expect( \"target variable --shlib \" + self.shared_lib_name, \"shared library should not have", "should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread = process.GetSelectedThread()", "for remote targets so they get # automatically uploaded self.environment = self.registerSharedLibrariesWithTarget( self.target,", "line_number(self.source, \"// Set breakpoint 0 here.\") self.shlib_names = [\"foo\"] def common_setup(self): # Run", ") self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source, self.line, num_expected_locations=1,", "def common_setup(self): # Run in synchronous mode self.dbg.SetAsync(False) self.runCmd(\"settings set symbols.load-on-demand true\") #", "\"foo.c\", 4, num_expected_locations=1, loc_exact=True ) # Now launch the process, and do not", "at entry point. process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) #", "= self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The stop reason of", "lldbutil.run_break_set_by_symbol( self, \"foo\", sym_exact=True, num_expected_locations=1 ) # Now launch the process, and do", "4, num_expected_locations=1, loc_exact=True ) # Now launch the process, and do not stop", "stop reason of the thread should be breakpoint. self.expect( \"thread list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\",", "reason of the thread should be breakpoint. self.expect( \"thread list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop", "types defined in shared libraries work correctly.\"\"\" import lldb import unittest2 from lldbsuite.test.decorators", "= ctx.shlib_prefix + \"foo.\" + ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self,", "Run in synchronous mode self.dbg.SetAsync(False) self.runCmd(\"settings set symbols.load-on-demand true\") # Create a target", ") # Now launch the process, and do not stop at entry point.", "--shlib \" + self.shared_lib_name, \"shared library should not have debug info by default\",", "SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find", "parent_frame = stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self): self.build()", "be breakpoint. self.expect( \"thread list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop reason = breakpoint\"], ) #", "process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine())", "\"foo\", sym_exact=True, num_expected_locations=1 ) # Now launch the process, and do not stop", "+ self.shared_lib_name, \"shared library should not have debug info by default\", matching=False, substrs=[\"global_foo\"],", "self.shared_lib_name, \"Match global_foo in symbol table should hydrate debug info\", matching=True, substrs=[\"global_foo =", "the debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET) # Register our shared libraries for", "have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread = process.GetSelectedThread() stack_frames", "count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect( \"target variable --shlib a.out\", \"Breakpoint in", "bpno=1, expected_hit_count=1) thread = process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0]", "work correctly.\"\"\" import lldb import unittest2 from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import", "self.dbg.SetAsync(False) self.runCmd(\"settings set symbols.load-on-demand true\") # Create a target by the debugger. self.target", "\"target variable --shlib \" + self.shared_lib_name, \"shared library should not have debug info", "# Create a target by the debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET) #", "num_expected_locations=1 ) # Now launch the process, and do not stop at entry", "should not have debug info by default\", matching=False, substrs=[\"global_foo\"], ) self.expect( \"target variable", "number to break inside main(). self.source = \"shared.c\" self.line = line_number(self.source, \"// Set", "= self.registerSharedLibrariesWithTarget( self.target, self.shlib_names ) ctx = self.platformContext self.shared_lib_name = ctx.shlib_prefix + \"foo.\"", "# Call super's setUp(). TestBase.setUp(self) # Find the line number to break inside", "sym_exact=True, num_expected_locations=1 ) # Now launch the process, and do not stop at", "self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The stop reason of the", ") ctx = self.platformContext self.shared_lib_name = ctx.shlib_prefix + \"foo.\" + ctx.shlib_extension @skipIfWindows def", "breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread =", "self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def", "process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The stop reason", "parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self, \"foo\", sym_exact=True,", "Now launch the process, and do not stop at entry point. process =", "self.runCmd(\"settings set symbols.load-on-demand true\") # Create a target by the debugger. self.target =", "The breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect(", "variable --shlib \" + self.shared_lib_name, \"shared library should not have debug info by", "break inside main(). self.source = \"shared.c\" self.line = line_number(self.source, \"// Set breakpoint 0", "correctly.\"\"\" import lldb import unittest2 from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import *", ") # The breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1,", "by default\", matching=False, substrs=[\"global_foo\"], ) self.expect( \"target variable global_foo --shlib \" + self.shared_lib_name,", "= line_number(self.source, \"// Set breakpoint 0 here.\") self.shlib_names = [\"foo\"] def common_setup(self): #", "lldb import unittest2 from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil", "lldbutil class SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self): # Call super's setUp(). TestBase.setUp(self)", "\"thread list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop reason = breakpoint\"], ) # The breakpoint should", "of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread = process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2)", "substrs=[\"global_foo\"], ) self.expect( \"target variable global_foo --shlib \" + self.shared_lib_name, \"Match global_foo in", "self.line = line_number(self.source, \"// Set breakpoint 0 here.\") self.shlib_names = [\"foo\"] def common_setup(self):", "--shlib a.out\", \"Breakpoint in a.out should have hydrated the debug info\", substrs=[\"global_shared =", "Create a target by the debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET) # Register", "mode self.dbg.SetAsync(False) self.runCmd(\"settings set symbols.load-on-demand true\") # Create a target by the debugger.", "remote targets so they get # automatically uploaded self.environment = self.registerSharedLibrariesWithTarget( self.target, self.shlib_names", "def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find the line number to", "\"shared.c\" self.line = line_number(self.source, \"// Set breakpoint 0 here.\") self.shlib_names = [\"foo\"] def", "\"stop reason = breakpoint\"], ) # The breakpoint should have a hit count", "@skipIfWindows def test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, \"foo.c\", 4, num_expected_locations=1, loc_exact=True ) #", "launch the process, and do not stop at entry point. process = self.target.LaunchSimple(", "@skipIfWindows def test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self, \"foo\", sym_exact=True, num_expected_locations=1 ) # Now", "def test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self, \"foo\", sym_exact=True, num_expected_locations=1 ) # Now launch", "should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect( \"target variable", "self.common_setup() lldbutil.run_break_set_by_symbol( self, \"foo\", sym_exact=True, num_expected_locations=1 ) # Now launch the process, and", "debug info\", substrs=[\"global_shared = 897\"], ) self.expect( \"target variable --shlib \" + self.shared_lib_name,", "self.shared_lib_name, \"shared library should not have debug info by default\", matching=False, substrs=[\"global_foo\"], )", "not have debug info by default\", matching=False, substrs=[\"global_foo\"], ) self.expect( \"target variable global_foo", "= breakpoint\"], ) # The breakpoint should have a hit count of 1.", "STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop reason = breakpoint\"], ) # The breakpoint should have a", "import lldbsuite.test.lldbutil as lldbutil class SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self): # Call", "automatically uploaded self.environment = self.registerSharedLibrariesWithTarget( self.target, self.shlib_names ) ctx = self.platformContext self.shared_lib_name =", "process, and do not stop at entry point. process = self.target.LaunchSimple( None, self.environment,", "leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self):", "libraries for remote targets so they get # automatically uploaded self.environment = self.registerSharedLibrariesWithTarget(", "get # automatically uploaded self.environment = self.registerSharedLibrariesWithTarget( self.target, self.shlib_names ) ctx = self.platformContext", "targets so they get # automatically uploaded self.environment = self.registerSharedLibrariesWithTarget( self.target, self.shlib_names )", "not stop at entry point. process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process,", "of the thread should be breakpoint. self.expect( \"thread list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop reason", "self.common_setup() lldbutil.run_break_set_by_file_and_line( self, \"foo.c\", 4, num_expected_locations=1, loc_exact=True ) # Now launch the process,", "count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread = process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames),", "\"foo.\" + ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, \"foo.c\", 4, num_expected_locations=1,", "# The stop reason of the thread should be breakpoint. self.expect( \"thread list\",", "ctx.shlib_prefix + \"foo.\" + ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, \"foo.c\",", "The breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread", ") self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self, \"foo\", sym_exact=True, num_expected_locations=1", "lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil as lldbutil class SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def", "test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self, \"foo\", sym_exact=True, num_expected_locations=1 ) # Now launch the", "shared libraries work correctly.\"\"\" import lldb import unittest2 from lldbsuite.test.decorators import * from", "setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find the line number to break", "debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET) # Register our shared libraries for remote", "mydir = TestBase.compute_mydir(__file__) def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find the", "uploaded self.environment = self.registerSharedLibrariesWithTarget( self.target, self.shlib_names ) ctx = self.platformContext self.shared_lib_name = ctx.shlib_prefix", "hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread = process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread)", "target by the debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET) # Register our shared", "self, \"foo\", sym_exact=True, num_expected_locations=1 ) # Now launch the process, and do not", "= self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET) # Register our shared libraries for remote targets so", "variable --shlib a.out\", \"Breakpoint in a.out should have hydrated the debug info\", substrs=[\"global_shared", "# Now launch the process, and do not stop at entry point. process", "import * import lldbsuite.test.lldbutil as lldbutil class SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self):", "leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self):", "as lldbutil class SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self): # Call super's setUp().", "num_expected_locations=1, loc_exact=True ) # Now launch the process, and do not stop at", "self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source, self.line, num_expected_locations=1, loc_exact=True", "VALID_TARGET) # Register our shared libraries for remote targets so they get #", "self.platformContext self.shared_lib_name = ctx.shlib_prefix + \"foo.\" + ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self): self.build() self.common_setup()", "= [\"foo\"] def common_setup(self): # Run in synchronous mode self.dbg.SetAsync(False) self.runCmd(\"settings set symbols.load-on-demand", "test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source, self.line, num_expected_locations=1, loc_exact=True ) # Now launch", "* import lldbsuite.test.lldbutil as lldbutil class SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self): #", "self.assertTrue(self.target, VALID_TARGET) # Register our shared libraries for remote targets so they get", "a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect( \"target variable --shlib a.out\",", "self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET) # Register our shared libraries for remote targets", "entry point. process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The", "a.out\", \"Breakpoint in a.out should have hydrated the debug info\", substrs=[\"global_shared = 897\"],", "leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows", "from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil as lldbutil class", "synchronous mode self.dbg.SetAsync(False) self.runCmd(\"settings set symbols.load-on-demand true\") # Create a target by the", "in a.out should have hydrated the debug info\", substrs=[\"global_shared = 897\"], ) self.expect(", "= stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self): self.build() self.common_setup()", "\" + self.shared_lib_name, \"shared library should not have debug info by default\", matching=False,", "bpno=1, expected_hit_count=1) self.expect( \"target variable --shlib a.out\", \"Breakpoint in a.out should have hydrated", "# automatically uploaded self.environment = self.registerSharedLibrariesWithTarget( self.target, self.shlib_names ) ctx = self.platformContext self.shared_lib_name", "self.expect( \"target variable --shlib \" + self.shared_lib_name, \"shared library should not have debug", "a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread = process.GetSelectedThread() stack_frames =", "Register our shared libraries for remote targets so they get # automatically uploaded", "lldbutil.run_break_set_by_file_and_line( self, self.source, self.line, num_expected_locations=1, loc_exact=True ) # Now launch the process, and", "self.target, self.shlib_names ) ctx = self.platformContext self.shared_lib_name = ctx.shlib_prefix + \"foo.\" + ctx.shlib_extension", "import * from lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil as lldbutil class SharedLibTestCase(TestBase): mydir", "Find the line number to break inside main(). self.source = \"shared.c\" self.line =", "= process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4,", "breakpoint 0 here.\") self.shlib_names = [\"foo\"] def common_setup(self): # Run in synchronous mode", "true\") # Create a target by the debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET)", "self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source, self.line, num_expected_locations=1, loc_exact=True ) # Now launch the", "of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect( \"target variable --shlib a.out\", \"Breakpoint in a.out", "self.expect( \"target variable --shlib a.out\", \"Breakpoint in a.out should have hydrated the debug", "they get # automatically uploaded self.environment = self.registerSharedLibrariesWithTarget( self.target, self.shlib_names ) ctx =", "in synchronous mode self.dbg.SetAsync(False) self.runCmd(\"settings set symbols.load-on-demand true\") # Create a target by", "\"shared library should not have debug info by default\", matching=False, substrs=[\"global_foo\"], ) self.expect(", "thread should be breakpoint. self.expect( \"thread list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop reason = breakpoint\"],", "self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET) # Register our shared libraries for remote targets so they", "lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect( \"target variable --shlib a.out\", \"Breakpoint in a.out should have", "should have hydrated the debug info\", substrs=[\"global_shared = 897\"], ) self.expect( \"target variable", "= 897\"], ) self.expect( \"target variable --shlib \" + self.shared_lib_name, \"shared library should", "\"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self, \"foo\",", "have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect( \"target variable --shlib", "= stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() )", "parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source, self.line,", "+ \"foo.\" + ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, \"foo.c\", 4,", "self.assertTrue(process, PROCESS_IS_VALID) # The stop reason of the thread should be breakpoint. self.expect(", "self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self, \"foo\", sym_exact=True, num_expected_locations=1 ) # Now launch the process,", "hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect( \"target variable --shlib a.out\", \"Breakpoint", "hydrated the debug info\", substrs=[\"global_shared = 897\"], ) self.expect( \"target variable --shlib \"", "symbols.load-on-demand true\") # Create a target by the debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target,", "= TestBase.compute_mydir(__file__) def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find the line", "0 here.\") self.shlib_names = [\"foo\"] def common_setup(self): # Run in synchronous mode self.dbg.SetAsync(False)", "stop at entry point. process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID)", "parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source, self.line, num_expected_locations=1, loc_exact=True )", "--shlib \" + self.shared_lib_name, \"Match global_foo in symbol table should hydrate debug info\",", "ctx = self.platformContext self.shared_lib_name = ctx.shlib_prefix + \"foo.\" + ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self):", "thread = process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename())", "info\", substrs=[\"global_shared = 897\"], ) self.expect( \"target variable --shlib \" + self.shared_lib_name, \"shared", "lldbsuite.test.lldbutil as lldbutil class SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self): # Call super's", "leaf_frame = stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename()", "TestBase.setUp(self) # Find the line number to break inside main(). self.source = \"shared.c\"", "= \"shared.c\" self.line = line_number(self.source, \"// Set breakpoint 0 here.\") self.shlib_names = [\"foo\"]", "self, self.source, self.line, num_expected_locations=1, loc_exact=True ) # Now launch the process, and do", "variable global_foo --shlib \" + self.shared_lib_name, \"Match global_foo in symbol table should hydrate", "self.source, self.line, num_expected_locations=1, loc_exact=True ) # Now launch the process, and do not", "list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop reason = breakpoint\"], ) # The breakpoint should have", "lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil as lldbutil class SharedLibTestCase(TestBase):", "self.expect( \"thread list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop reason = breakpoint\"], ) # The breakpoint", "\"target variable global_foo --shlib \" + self.shared_lib_name, \"Match global_foo in symbol table should", "the thread should be breakpoint. self.expect( \"thread list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop reason =", "self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source, self.line, num_expected_locations=1, loc_exact=True ) # Now launch the process,", "897\"], ) self.expect( \"target variable --shlib \" + self.shared_lib_name, \"shared library should not", "@skipIfWindows def test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source, self.line, num_expected_locations=1, loc_exact=True ) #", "global_foo --shlib \" + self.shared_lib_name, \"Match global_foo in symbol table should hydrate debug", "matching=False, substrs=[\"global_foo\"], ) self.expect( \"target variable global_foo --shlib \" + self.shared_lib_name, \"Match global_foo", "from lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil as lldbutil class SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__)", "None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The stop reason of the thread", "\"Match global_foo in symbol table should hydrate debug info\", matching=True, substrs=[\"global_foo = 321\"],", "= lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame =", "self.expect( \"target variable global_foo --shlib \" + self.shared_lib_name, \"Match global_foo in symbol table", "info by default\", matching=False, substrs=[\"global_foo\"], ) self.expect( \"target variable global_foo --shlib \" +", "1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread = process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame", "self.shared_lib_name = ctx.shlib_prefix + \"foo.\" + ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line(", "test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, \"foo.c\", 4, num_expected_locations=1, loc_exact=True ) # Now launch", "TestBase.compute_mydir(__file__) def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find the line number", "stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol(", "1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect( \"target variable --shlib a.out\", \"Breakpoint in a.out should", "expected_hit_count=1) thread = process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0] self.assertEqual(\"foo.c\",", "self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine())", "the process, and do not stop at entry point. process = self.target.LaunchSimple( None,", "a.out should have hydrated the debug info\", substrs=[\"global_shared = 897\"], ) self.expect( \"target", "setUp(). TestBase.setUp(self) # Find the line number to break inside main(). self.source =", "reason = breakpoint\"], ) # The breakpoint should have a hit count of", "loc_exact=True ) # Now launch the process, and do not stop at entry", "self.line, num_expected_locations=1, loc_exact=True ) # Now launch the process, and do not stop", "common_setup(self): # Run in synchronous mode self.dbg.SetAsync(False) self.runCmd(\"settings set symbols.load-on-demand true\") # Create", "defined in shared libraries work correctly.\"\"\" import lldb import unittest2 from lldbsuite.test.decorators import", "self.shlib_names = [\"foo\"] def common_setup(self): # Run in synchronous mode self.dbg.SetAsync(False) self.runCmd(\"settings set", "self.shlib_names ) ctx = self.platformContext self.shared_lib_name = ctx.shlib_prefix + \"foo.\" + ctx.shlib_extension @skipIfWindows", "The stop reason of the thread should be breakpoint. self.expect( \"thread list\", STOPPED_DUE_TO_BREAKPOINT,", "super's setUp(). TestBase.setUp(self) # Find the line number to break inside main(). self.source", "shared libraries for remote targets so they get # automatically uploaded self.environment =", "import lldb import unittest2 from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * import", "so they get # automatically uploaded self.environment = self.registerSharedLibrariesWithTarget( self.target, self.shlib_names ) ctx", "class SharedLibTestCase(TestBase): mydir = TestBase.compute_mydir(__file__) def setUp(self): # Call super's setUp(). TestBase.setUp(self) #", "libraries work correctly.\"\"\" import lldb import unittest2 from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest", "should be breakpoint. self.expect( \"thread list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop reason = breakpoint\"], )", "Call super's setUp(). TestBase.setUp(self) # Find the line number to break inside main().", "library should not have debug info by default\", matching=False, substrs=[\"global_foo\"], ) self.expect( \"target", "inside main(). self.source = \"shared.c\" self.line = line_number(self.source, \"// Set breakpoint 0 here.\")", "breakpoint. self.expect( \"thread list\", STOPPED_DUE_TO_BREAKPOINT, substrs=[\"stopped\", \"stop reason = breakpoint\"], ) # The", "self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual(", "= self.platformContext self.shared_lib_name = ctx.shlib_prefix + \"foo.\" + ctx.shlib_extension @skipIfWindows def test_source_line_breakpoint(self): self.build()", "# Run in synchronous mode self.dbg.SetAsync(False) self.runCmd(\"settings set symbols.load-on-demand true\") # Create a", "self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The stop reason of the thread should be", "self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self, \"foo\", sym_exact=True, num_expected_locations=1 )", "debug info by default\", matching=False, substrs=[\"global_foo\"], ) self.expect( \"target variable global_foo --shlib \"", "breakpoint\"], ) # The breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self,", "self.environment = self.registerSharedLibrariesWithTarget( self.target, self.shlib_names ) ctx = self.platformContext self.shared_lib_name = ctx.shlib_prefix +", "line number to break inside main(). self.source = \"shared.c\" self.line = line_number(self.source, \"//", "and do not stop at entry point. process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory()", "\"// Set breakpoint 0 here.\") self.shlib_names = [\"foo\"] def common_setup(self): # Run in", "stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7,", "parent_frame = stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self): self.build()", "self.registerSharedLibrariesWithTarget( self.target, self.shlib_names ) ctx = self.platformContext self.shared_lib_name = ctx.shlib_prefix + \"foo.\" +", "PROCESS_IS_VALID) # The stop reason of the thread should be breakpoint. self.expect( \"thread", "parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_symbolic_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_symbol( self, \"foo\", sym_exact=True, num_expected_locations=1 ) #", "by the debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET) # Register our shared libraries", "lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) thread = process.GetSelectedThread() stack_frames = lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame =", "\"\"\"Test that types defined in shared libraries work correctly.\"\"\" import lldb import unittest2", "main(). self.source = \"shared.c\" self.line = line_number(self.source, \"// Set breakpoint 0 here.\") self.shlib_names", "# Register our shared libraries for remote targets so they get # automatically", "def test_source_line_breakpoint(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, \"foo.c\", 4, num_expected_locations=1, loc_exact=True ) # Now", "\"Breakpoint in a.out should have hydrated the debug info\", substrs=[\"global_shared = 897\"], )", "point. process = self.target.LaunchSimple( None, self.environment, self.get_process_working_directory() ) self.assertTrue(process, PROCESS_IS_VALID) # The stop", "+ self.shared_lib_name, \"Match global_foo in symbol table should hydrate debug info\", matching=True, substrs=[\"global_foo", "in shared libraries work correctly.\"\"\" import lldb import unittest2 from lldbsuite.test.decorators import *", "lldbutil.get_stack_frames(thread) self.assertGreater(len(stack_frames), 2) leaf_frame = stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1]", "breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1) self.expect( \"target", ") self.assertTrue(process, PROCESS_IS_VALID) # The stop reason of the thread should be breakpoint.", "self.source = \"shared.c\" self.line = line_number(self.source, \"// Set breakpoint 0 here.\") self.shlib_names =", "set symbols.load-on-demand true\") # Create a target by the debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\"))", "\"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source,", "import unittest2 from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil as", "that types defined in shared libraries work correctly.\"\"\" import lldb import unittest2 from", "the line number to break inside main(). self.source = \"shared.c\" self.line = line_number(self.source,", "here.\") self.shlib_names = [\"foo\"] def common_setup(self): # Run in synchronous mode self.dbg.SetAsync(False) self.runCmd(\"settings", "# The breakpoint should have a hit count of 1. lldbutil.check_breakpoint(self, bpno=1, expected_hit_count=1)", "expected_hit_count=1) self.expect( \"target variable --shlib a.out\", \"Breakpoint in a.out should have hydrated the", "substrs=[\"stopped\", \"stop reason = breakpoint\"], ) # The breakpoint should have a hit", "\"target variable --shlib a.out\", \"Breakpoint in a.out should have hydrated the debug info\",", "2) leaf_frame = stack_frames[0] self.assertEqual(\"foo.c\", leaf_frame.GetLineEntry().GetFileSpec().GetFilename()) self.assertEqual(4, leaf_frame.GetLineEntry().GetLine()) parent_frame = stack_frames[1] self.assertEqual( \"shared.c\",", "our shared libraries for remote targets so they get # automatically uploaded self.environment", "stack_frames[1] self.assertEqual( \"shared.c\", parent_frame.GetLineEntry().GetFileSpec().GetFilename() ) self.assertEqual(7, parent_frame.GetLineEntry().GetLine()) @skipIfWindows def test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line(", "have hydrated the debug info\", substrs=[\"global_shared = 897\"], ) self.expect( \"target variable --shlib", ") self.expect( \"target variable global_foo --shlib \" + self.shared_lib_name, \"Match global_foo in symbol", "the debug info\", substrs=[\"global_shared = 897\"], ) self.expect( \"target variable --shlib \" +", "substrs=[\"global_shared = 897\"], ) self.expect( \"target variable --shlib \" + self.shared_lib_name, \"shared library", "to break inside main(). self.source = \"shared.c\" self.line = line_number(self.source, \"// Set breakpoint", "def test_global_variable_hydration(self): self.build() self.common_setup() lldbutil.run_break_set_by_file_and_line( self, self.source, self.line, num_expected_locations=1, loc_exact=True ) # Now", "[\"foo\"] def common_setup(self): # Run in synchronous mode self.dbg.SetAsync(False) self.runCmd(\"settings set symbols.load-on-demand true\")", "a target by the debugger. self.target = self.dbg.CreateTarget(self.getBuildArtifact(\"a.out\")) self.assertTrue(self.target, VALID_TARGET) # Register our" ]
[ "current_os == \"Windows\" else \"kaitai-struct-compiler\" ) proc = subprocess.run( [ executable_file, \"-t\", \"python\",", "print(\"Loading tests.\") suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path, pattern=\"test_*.py\") for path in pathlib.Path(\"tbl\").iterdir() ) print(\"Running", "Ensure we're running in the correct folder so we don't destroy anything important", "\"__init__.py\": continue print(f\"Removing {file}\") file.unlink() print(\"Generating parser code\") os.chdir(\"cs3tbl\") executable_file = ( \"kaitai-struct-compiler.bat\"", "\"kaitaistruct.py\" ) else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) print(\"Cleaning up...\") for file in cwd.glob(\"cs3tbl/*.py\"):", "= platform.system() if not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link to kaitai struct compiler\") if current_os", "import platform import subprocess import unittest # Ensure we're running in the correct", "if file.name == \"__init__.py\": continue print(f\"Removing {file}\") file.unlink() print(\"Generating parser code\") os.chdir(\"cs3tbl\") executable_file", "in cwd.glob(\"cs3tbl/*.py\"): if file.name == \"__init__.py\": continue print(f\"Removing {file}\") file.unlink() print(\"Generating parser code\")", "tests.\") suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path, pattern=\"test_*.py\") for path in pathlib.Path(\"tbl\").iterdir() ) print(\"Running tests.\")", "] ) if proc.returncode != 0: raise RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\") os.chdir(\"..\") print(\"Setup done.\")", "up...\") for file in cwd.glob(\"cs3tbl/*.py\"): if file.name == \"__init__.py\": continue print(f\"Removing {file}\") file.unlink()", "else \"kaitai-struct-compiler\" ) proc = subprocess.run( [ executable_file, \"-t\", \"python\", \"--python-package\", \".\", \"-I\",", "{proc.returncode}\") os.chdir(\"..\") print(\"Setup done.\") print(\"Loading tests.\") suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path, pattern=\"test_*.py\") for path", "= subprocess.run( [ executable_file, \"-t\", \"python\", \"--python-package\", \".\", \"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\", ] )", "\"kaitai-struct-compiler\" ) proc = subprocess.run( [ executable_file, \"-t\", \"python\", \"--python-package\", \".\", \"-I\", \"../../schemas/\",", "we don't destroy anything important cwd = pathlib.Path(os.getcwd()) if cwd.name == \"SenSchema\": os.chdir(\"test\")", "import unittest # Ensure we're running in the correct folder so we don't", "\"SenSchema\": os.chdir(\"test\") cwd = pathlib.Path(os.getcwd()) assert cwd.name == \"test\" assert cwd.parent.name == \"SenSchema\"", "= pathlib.Path(os.getcwd()) if cwd.name == \"SenSchema\": os.chdir(\"test\") cwd = pathlib.Path(os.getcwd()) assert cwd.name ==", "subprocess.run( [ executable_file, \"-t\", \"python\", \"--python-package\", \".\", \"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\", ] ) if", "anything important cwd = pathlib.Path(os.getcwd()) if cwd.name == \"SenSchema\": os.chdir(\"test\") cwd = pathlib.Path(os.getcwd())", "current_os == \"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) print(\"Cleaning", "struct compiler\") if current_os == \"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\",", "link to kaitai struct compiler\") if current_os == \"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" )", "cwd.name == \"test\" assert cwd.parent.name == \"SenSchema\" current_os = platform.system() if not pathlib.Path(\"kaitaistruct.py\").exists():", "os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) print(\"Cleaning up...\") for file in cwd.glob(\"cs3tbl/*.py\"): if file.name ==", "in the correct folder so we don't destroy anything important cwd = pathlib.Path(os.getcwd())", "os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) print(\"Cleaning up...\") for file", "subprocess import unittest # Ensure we're running in the correct folder so we", "os import pathlib import platform import subprocess import unittest # Ensure we're running", "print(\"Generating parser code\") os.chdir(\"cs3tbl\") executable_file = ( \"kaitai-struct-compiler.bat\" if current_os == \"Windows\" else", ") proc = subprocess.run( [ executable_file, \"-t\", \"python\", \"--python-package\", \".\", \"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\",", "== \"SenSchema\": os.chdir(\"test\") cwd = pathlib.Path(os.getcwd()) assert cwd.name == \"test\" assert cwd.parent.name ==", "os.chdir(\"..\") print(\"Setup done.\") print(\"Loading tests.\") suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path, pattern=\"test_*.py\") for path in", "== \"test\" assert cwd.parent.name == \"SenSchema\" current_os = platform.system() if not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating", "the correct folder so we don't destroy anything important cwd = pathlib.Path(os.getcwd()) if", "assert cwd.parent.name == \"SenSchema\" current_os = platform.system() if not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link to", "parser code\") os.chdir(\"cs3tbl\") executable_file = ( \"kaitai-struct-compiler.bat\" if current_os == \"Windows\" else \"kaitai-struct-compiler\"", "unittest # Ensure we're running in the correct folder so we don't destroy", "platform import subprocess import unittest # Ensure we're running in the correct folder", "( \"kaitai-struct-compiler.bat\" if current_os == \"Windows\" else \"kaitai-struct-compiler\" ) proc = subprocess.run( [", ") if proc.returncode != 0: raise RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\") os.chdir(\"..\") print(\"Setup done.\") print(\"Loading", "if current_os == \"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" )", "if not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link to kaitai struct compiler\") if current_os == \"Windows\":", ") print(\"Cleaning up...\") for file in cwd.glob(\"cs3tbl/*.py\"): if file.name == \"__init__.py\": continue print(f\"Removing", "import os import pathlib import platform import subprocess import unittest # Ensure we're", "pathlib.Path(os.getcwd()) assert cwd.name == \"test\" assert cwd.parent.name == \"SenSchema\" current_os = platform.system() if", "\"python\", \"--python-package\", \".\", \"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\", ] ) if proc.returncode != 0: raise", "returned {proc.returncode}\") os.chdir(\"..\") print(\"Setup done.\") print(\"Loading tests.\") suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path, pattern=\"test_*.py\") for", "we're running in the correct folder so we don't destroy anything important cwd", "compiler\") if current_os == \"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\"", "pathlib import platform import subprocess import unittest # Ensure we're running in the", "import pathlib import platform import subprocess import unittest # Ensure we're running in", "os.chdir(\"test\") cwd = pathlib.Path(os.getcwd()) assert cwd.name == \"test\" assert cwd.parent.name == \"SenSchema\" current_os", "cwd.name == \"SenSchema\": os.chdir(\"test\") cwd = pathlib.Path(os.getcwd()) assert cwd.name == \"test\" assert cwd.parent.name", "executable_file, \"-t\", \"python\", \"--python-package\", \".\", \"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\", ] ) if proc.returncode !=", "destroy anything important cwd = pathlib.Path(os.getcwd()) if cwd.name == \"SenSchema\": os.chdir(\"test\") cwd =", "cwd.parent.name == \"SenSchema\" current_os = platform.system() if not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link to kaitai", "for file in cwd.glob(\"cs3tbl/*.py\"): if file.name == \"__init__.py\": continue print(f\"Removing {file}\") file.unlink() print(\"Generating", "\"-t\", \"python\", \"--python-package\", \".\", \"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\", ] ) if proc.returncode != 0:", "0: raise RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\") os.chdir(\"..\") print(\"Setup done.\") print(\"Loading tests.\") suite = unittest.TestSuite(", "!= 0: raise RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\") os.chdir(\"..\") print(\"Setup done.\") print(\"Loading tests.\") suite =", "done.\") print(\"Loading tests.\") suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path, pattern=\"test_*.py\") for path in pathlib.Path(\"tbl\").iterdir() )", "\".\", \"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\", ] ) if proc.returncode != 0: raise RuntimeError(f\"kaitai-struct-compiler returned", "executable_file = ( \"kaitai-struct-compiler.bat\" if current_os == \"Windows\" else \"kaitai-struct-compiler\" ) proc =", "print(\"Creating link to kaitai struct compiler\") if current_os == \"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\"", "else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) print(\"Cleaning up...\") for file in cwd.glob(\"cs3tbl/*.py\"): if file.name", "cwd = pathlib.Path(os.getcwd()) if cwd.name == \"SenSchema\": os.chdir(\"test\") cwd = pathlib.Path(os.getcwd()) assert cwd.name", "running in the correct folder so we don't destroy anything important cwd =", "assert cwd.name == \"test\" assert cwd.parent.name == \"SenSchema\" current_os = platform.system() if not", "kaitai struct compiler\") if current_os == \"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) else: os.symlink(", "[ executable_file, \"-t\", \"python\", \"--python-package\", \".\", \"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\", ] ) if proc.returncode", "== \"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) print(\"Cleaning up...\")", "raise RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\") os.chdir(\"..\") print(\"Setup done.\") print(\"Loading tests.\") suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path,", "\"../../schemas/\", \"../../schemas/cs3.ksy\", ] ) if proc.returncode != 0: raise RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\") os.chdir(\"..\")", "not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link to kaitai struct compiler\") if current_os == \"Windows\": os.link(", "\"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\", ] ) if proc.returncode != 0: raise RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\")", ") else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) print(\"Cleaning up...\") for file in cwd.glob(\"cs3tbl/*.py\"): if", "don't destroy anything important cwd = pathlib.Path(os.getcwd()) if cwd.name == \"SenSchema\": os.chdir(\"test\") cwd", "proc = subprocess.run( [ executable_file, \"-t\", \"python\", \"--python-package\", \".\", \"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\", ]", "== \"__init__.py\": continue print(f\"Removing {file}\") file.unlink() print(\"Generating parser code\") os.chdir(\"cs3tbl\") executable_file = (", "pathlib.Path(os.getcwd()) if cwd.name == \"SenSchema\": os.chdir(\"test\") cwd = pathlib.Path(os.getcwd()) assert cwd.name == \"test\"", "if proc.returncode != 0: raise RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\") os.chdir(\"..\") print(\"Setup done.\") print(\"Loading tests.\")", "\"Windows\" else \"kaitai-struct-compiler\" ) proc = subprocess.run( [ executable_file, \"-t\", \"python\", \"--python-package\", \".\",", "# Ensure we're running in the correct folder so we don't destroy anything", "folder so we don't destroy anything important cwd = pathlib.Path(os.getcwd()) if cwd.name ==", "cwd = pathlib.Path(os.getcwd()) assert cwd.name == \"test\" assert cwd.parent.name == \"SenSchema\" current_os =", "pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link to kaitai struct compiler\") if current_os == \"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\",", "{file}\") file.unlink() print(\"Generating parser code\") os.chdir(\"cs3tbl\") executable_file = ( \"kaitai-struct-compiler.bat\" if current_os ==", "print(f\"Removing {file}\") file.unlink() print(\"Generating parser code\") os.chdir(\"cs3tbl\") executable_file = ( \"kaitai-struct-compiler.bat\" if current_os", "if current_os == \"Windows\" else \"kaitai-struct-compiler\" ) proc = subprocess.run( [ executable_file, \"-t\",", "import subprocess import unittest # Ensure we're running in the correct folder so", "file.name == \"__init__.py\": continue print(f\"Removing {file}\") file.unlink() print(\"Generating parser code\") os.chdir(\"cs3tbl\") executable_file =", "file in cwd.glob(\"cs3tbl/*.py\"): if file.name == \"__init__.py\": continue print(f\"Removing {file}\") file.unlink() print(\"Generating parser", "so we don't destroy anything important cwd = pathlib.Path(os.getcwd()) if cwd.name == \"SenSchema\":", "\"kaitaistruct.py\" ) print(\"Cleaning up...\") for file in cwd.glob(\"cs3tbl/*.py\"): if file.name == \"__init__.py\": continue", "os.chdir(\"cs3tbl\") executable_file = ( \"kaitai-struct-compiler.bat\" if current_os == \"Windows\" else \"kaitai-struct-compiler\" ) proc", "\"../../schemas/cs3.ksy\", ] ) if proc.returncode != 0: raise RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\") os.chdir(\"..\") print(\"Setup", "RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\") os.chdir(\"..\") print(\"Setup done.\") print(\"Loading tests.\") suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path, pattern=\"test_*.py\")", "if cwd.name == \"SenSchema\": os.chdir(\"test\") cwd = pathlib.Path(os.getcwd()) assert cwd.name == \"test\" assert", "\"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) print(\"Cleaning up...\") for", "\"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) else: os.symlink( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) print(\"Cleaning up...\") for file in", "code\") os.chdir(\"cs3tbl\") executable_file = ( \"kaitai-struct-compiler.bat\" if current_os == \"Windows\" else \"kaitai-struct-compiler\" )", "\"SenSchema\" current_os = platform.system() if not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link to kaitai struct compiler\")", "platform.system() if not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link to kaitai struct compiler\") if current_os ==", "\"kaitai-struct-compiler.bat\" if current_os == \"Windows\" else \"kaitai-struct-compiler\" ) proc = subprocess.run( [ executable_file,", "suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path, pattern=\"test_*.py\") for path in pathlib.Path(\"tbl\").iterdir() ) print(\"Running tests.\") unittest.TextTestRunner().run(suite)", "to kaitai struct compiler\") if current_os == \"Windows\": os.link( \"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) else:", "== \"SenSchema\" current_os = platform.system() if not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link to kaitai struct", "\"--python-package\", \".\", \"-I\", \"../../schemas/\", \"../../schemas/cs3.ksy\", ] ) if proc.returncode != 0: raise RuntimeError(f\"kaitai-struct-compiler", "\"test\" assert cwd.parent.name == \"SenSchema\" current_os = platform.system() if not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link", "important cwd = pathlib.Path(os.getcwd()) if cwd.name == \"SenSchema\": os.chdir(\"test\") cwd = pathlib.Path(os.getcwd()) assert", "print(\"Setup done.\") print(\"Loading tests.\") suite = unittest.TestSuite( unittest.TestLoader().discover(start_dir=path, pattern=\"test_*.py\") for path in pathlib.Path(\"tbl\").iterdir()", "print(\"Cleaning up...\") for file in cwd.glob(\"cs3tbl/*.py\"): if file.name == \"__init__.py\": continue print(f\"Removing {file}\")", "file.unlink() print(\"Generating parser code\") os.chdir(\"cs3tbl\") executable_file = ( \"kaitai-struct-compiler.bat\" if current_os == \"Windows\"", "cwd.glob(\"cs3tbl/*.py\"): if file.name == \"__init__.py\": continue print(f\"Removing {file}\") file.unlink() print(\"Generating parser code\") os.chdir(\"cs3tbl\")", "continue print(f\"Removing {file}\") file.unlink() print(\"Generating parser code\") os.chdir(\"cs3tbl\") executable_file = ( \"kaitai-struct-compiler.bat\" if", "= pathlib.Path(os.getcwd()) assert cwd.name == \"test\" assert cwd.parent.name == \"SenSchema\" current_os = platform.system()", "= ( \"kaitai-struct-compiler.bat\" if current_os == \"Windows\" else \"kaitai-struct-compiler\" ) proc = subprocess.run(", "current_os = platform.system() if not pathlib.Path(\"kaitaistruct.py\").exists(): print(\"Creating link to kaitai struct compiler\") if", "\"3rdparty/kaitai_struct_python_runtime/kaitaistruct.py\", \"kaitaistruct.py\" ) print(\"Cleaning up...\") for file in cwd.glob(\"cs3tbl/*.py\"): if file.name == \"__init__.py\":", "correct folder so we don't destroy anything important cwd = pathlib.Path(os.getcwd()) if cwd.name", "proc.returncode != 0: raise RuntimeError(f\"kaitai-struct-compiler returned {proc.returncode}\") os.chdir(\"..\") print(\"Setup done.\") print(\"Loading tests.\") suite", "== \"Windows\" else \"kaitai-struct-compiler\" ) proc = subprocess.run( [ executable_file, \"-t\", \"python\", \"--python-package\"," ]
[ "of the message type. Returns: str: The name of the message type. \"\"\"", "\"\"\" Return the color code of the message type. Returns: str: The color", "\"\"\" Return the name of the message type. Returns: str: The name of", "Return the color code of the message type. Returns: str: The color code", "type. \"\"\" pass def get_color_code(self): \"\"\" Return the color code of the message", "import ABC class MessageType(ABC): \"\"\" Interface for message types. \"\"\" def get_name(self): \"\"\"", "The name of the message type. \"\"\" pass def get_color_code(self): \"\"\" Return the", "for message types. \"\"\" def get_name(self): \"\"\" Return the name of the message", "Return the name of the message type. Returns: str: The name of the", "color code of the message type. Returns: str: The color code of the", "types. \"\"\" def get_name(self): \"\"\" Return the name of the message type. Returns:", "Interface for message types. \"\"\" def get_name(self): \"\"\" Return the name of the", "message types. \"\"\" def get_name(self): \"\"\" Return the name of the message type.", "name of the message type. \"\"\" pass def get_color_code(self): \"\"\" Return the color", "message type. \"\"\" pass def get_color_code(self): \"\"\" Return the color code of the", "message type. Returns: str: The color code of the message type. \"\"\" pass", "def get_color_code(self): \"\"\" Return the color code of the message type. Returns: str:", "get_color_code(self): \"\"\" Return the color code of the message type. Returns: str: The", "type. Returns: str: The name of the message type. \"\"\" pass def get_color_code(self):", "the message type. Returns: str: The color code of the message type. \"\"\"", "the name of the message type. Returns: str: The name of the message", "Returns: str: The name of the message type. \"\"\" pass def get_color_code(self): \"\"\"", "def get_name(self): \"\"\" Return the name of the message type. Returns: str: The", "message type. Returns: str: The name of the message type. \"\"\" pass def", "\"\"\" pass def get_color_code(self): \"\"\" Return the color code of the message type.", "from abc import ABC class MessageType(ABC): \"\"\" Interface for message types. \"\"\" def", "of the message type. \"\"\" pass def get_color_code(self): \"\"\" Return the color code", "\"\"\" Interface for message types. \"\"\" def get_name(self): \"\"\" Return the name of", "str: The name of the message type. \"\"\" pass def get_color_code(self): \"\"\" Return", "ABC class MessageType(ABC): \"\"\" Interface for message types. \"\"\" def get_name(self): \"\"\" Return", "abc import ABC class MessageType(ABC): \"\"\" Interface for message types. \"\"\" def get_name(self):", "\"\"\" def get_name(self): \"\"\" Return the name of the message type. Returns: str:", "pass def get_color_code(self): \"\"\" Return the color code of the message type. Returns:", "MessageType(ABC): \"\"\" Interface for message types. \"\"\" def get_name(self): \"\"\" Return the name", "the message type. Returns: str: The name of the message type. \"\"\" pass", "the message type. \"\"\" pass def get_color_code(self): \"\"\" Return the color code of", "code of the message type. Returns: str: The color code of the message", "the color code of the message type. Returns: str: The color code of", "of the message type. Returns: str: The color code of the message type.", "name of the message type. Returns: str: The name of the message type.", "get_name(self): \"\"\" Return the name of the message type. Returns: str: The name", "class MessageType(ABC): \"\"\" Interface for message types. \"\"\" def get_name(self): \"\"\" Return the" ]
[ "] operations = [ migrations.AlterField( model_name=\"user\", name=\"dormroom\", field=models.ForeignKey( blank=True, help_text=\"Kollektivet personen bor i\",", "Migration(migrations.Migration): dependencies = [ (\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"), ] operations = [ migrations.AlterField(", "dependencies = [ (\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"), ] operations = [ migrations.AlterField( model_name=\"user\",", "2020-02-29 14:20 import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "3.0.3 on 2020-02-29 14:20 import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration):", "migrations, models class Migration(migrations.Migration): dependencies = [ (\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"), ] operations", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\",", "django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"Dormroom\", \"0004_auto_20200229_1420\"),", "[ (\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"), ] operations = [ migrations.AlterField( model_name=\"user\", name=\"dormroom\", field=models.ForeignKey(", "model_name=\"user\", name=\"dormroom\", field=models.ForeignKey( blank=True, help_text=\"Kollektivet personen bor i\", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"residents\", to=\"Dormroom.Dormroom\", ),", "on 2020-02-29 14:20 import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies", "by Django 3.0.3 on 2020-02-29 14:20 import django.db.models.deletion from django.db import migrations, models", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"),", "operations = [ migrations.AlterField( model_name=\"user\", name=\"dormroom\", field=models.ForeignKey( blank=True, help_text=\"Kollektivet personen bor i\", null=True,", "# Generated by Django 3.0.3 on 2020-02-29 14:20 import django.db.models.deletion from django.db import", "name=\"dormroom\", field=models.ForeignKey( blank=True, help_text=\"Kollektivet personen bor i\", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"residents\", to=\"Dormroom.Dormroom\", ), )", "class Migration(migrations.Migration): dependencies = [ (\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"), ] operations = [", "\"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"), ] operations = [ migrations.AlterField( model_name=\"user\", name=\"dormroom\", field=models.ForeignKey( blank=True, help_text=\"Kollektivet", "\"0005_merge_20200228_1005\"), ] operations = [ migrations.AlterField( model_name=\"user\", name=\"dormroom\", field=models.ForeignKey( blank=True, help_text=\"Kollektivet personen bor", "= [ migrations.AlterField( model_name=\"user\", name=\"dormroom\", field=models.ForeignKey( blank=True, help_text=\"Kollektivet personen bor i\", null=True, on_delete=django.db.models.deletion.SET_NULL,", "14:20 import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "import migrations, models class Migration(migrations.Migration): dependencies = [ (\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"), ]", "Django 3.0.3 on 2020-02-29 14:20 import django.db.models.deletion from django.db import migrations, models class", "models class Migration(migrations.Migration): dependencies = [ (\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"), ] operations =", "import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ (\"Dormroom\",", "(\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"), ] operations = [ migrations.AlterField( model_name=\"user\", name=\"dormroom\", field=models.ForeignKey( blank=True,", "migrations.AlterField( model_name=\"user\", name=\"dormroom\", field=models.ForeignKey( blank=True, help_text=\"Kollektivet personen bor i\", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"residents\", to=\"Dormroom.Dormroom\",", "Generated by Django 3.0.3 on 2020-02-29 14:20 import django.db.models.deletion from django.db import migrations,", "= [ (\"Dormroom\", \"0004_auto_20200229_1420\"), (\"SIFUser\", \"0005_merge_20200228_1005\"), ] operations = [ migrations.AlterField( model_name=\"user\", name=\"dormroom\",", "[ migrations.AlterField( model_name=\"user\", name=\"dormroom\", field=models.ForeignKey( blank=True, help_text=\"Kollektivet personen bor i\", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"residents\",", "(\"SIFUser\", \"0005_merge_20200228_1005\"), ] operations = [ migrations.AlterField( model_name=\"user\", name=\"dormroom\", field=models.ForeignKey( blank=True, help_text=\"Kollektivet personen", "field=models.ForeignKey( blank=True, help_text=\"Kollektivet personen bor i\", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name=\"residents\", to=\"Dormroom.Dormroom\", ), ) ]" ]
[ "Relationship(back_populates=\"sandbox\") # noqa: F821 applicants: List[\"Applicant\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 class SandboxCreate(SandboxBase):", "api.db.models.base import BaseModel, BaseTable class SchemaDef(pydantic.BaseModel): id: Optional[str] = None name: Optional[str] =", "Optional[str] = None cred_def_tag: Optional[str] = None class SandboxBase(BaseModel): tag: Optional[str] = Field(nullable=True)", "id: uuid.UUID tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] =", "version: Optional[str] = None attributes: Optional[List[str]] = [] class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] =", "Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxRead(SandboxBase): id: uuid.UUID created_at: datetime", "Optional[str] = Field(nullable=True) governance: dict = Field(default={}, sa_column=Column(JSON)) governance_cas: dict = Field(default={}, sa_column=Column(JSON))", "= None name: Optional[str] = None version: Optional[str] = None attributes: Optional[List[str]] =", "lobs: List[\"Lob\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 students: List[\"Student\"] = Relationship(back_populates=\"sandbox\") # noqa:", "= None attributes: Optional[List[str]] = [] class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] = None cred_def_id:", "datetime import datetime from typing import Optional, List import pydantic from sqlalchemy import", "= Field(nullable=True) governance: dict = Field(default={}, sa_column=Column(JSON)) governance_cas: dict = Field(default={}, sa_column=Column(JSON)) class", "sa_column=Column(JSON)) governance_cas: dict = Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable, table=True): lobs: List[\"Lob\"] =", "Relationship from api.db.models.base import BaseModel, BaseTable class SchemaDef(pydantic.BaseModel): id: Optional[str] = None name:", "datetime from typing import Optional, List import pydantic from sqlalchemy import Column, JSON", "updated_at: datetime tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] =", "import Optional, List import pydantic from sqlalchemy import Column, JSON from sqlmodel import", "id: Optional[str] = None name: Optional[str] = None version: Optional[str] = None attributes:", "class Sandbox(SandboxBase, BaseTable, table=True): lobs: List[\"Lob\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 students: List[\"Student\"]", "Column, JSON from sqlmodel import Field, Relationship from api.db.models.base import BaseModel, BaseTable class", "= Relationship(back_populates=\"sandbox\") # noqa: F821 class SandboxCreate(SandboxBase): tag: Optional[str] = None governance: Optional[Governance]", "SchemaDef(pydantic.BaseModel): id: Optional[str] = None name: Optional[str] = None version: Optional[str] = None", "SandboxBase(BaseModel): tag: Optional[str] = Field(nullable=True) governance: dict = Field(default={}, sa_column=Column(JSON)) governance_cas: dict =", "noqa: F821 students: List[\"Student\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 applicants: List[\"Applicant\"] = Relationship(back_populates=\"sandbox\")", "BaseTable, table=True): lobs: List[\"Lob\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 students: List[\"Student\"] = Relationship(back_populates=\"sandbox\")", "from datetime import datetime from typing import Optional, List import pydantic from sqlalchemy", "class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] = None cred_def_id: Optional[str] = None cred_def_tag: Optional[str] =", "governance_cas: Optional[Governance] = None class SandboxUpdate(SandboxBase): id: uuid.UUID tag: Optional[str] = None governance:", "class SandboxUpdate(SandboxBase): id: uuid.UUID tag: Optional[str] = None governance: Optional[Governance] = None governance_cas:", "Optional[List[str]] = [] class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] = None cred_def_id: Optional[str] = None", "= None governance_cas: Optional[Governance] = None class SandboxUpdate(SandboxBase): id: uuid.UUID tag: Optional[str] =", "Field(nullable=True) governance: dict = Field(default={}, sa_column=Column(JSON)) governance_cas: dict = Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase,", "governance: dict = Field(default={}, sa_column=Column(JSON)) governance_cas: dict = Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable,", "cred_def_id: Optional[str] = None cred_def_tag: Optional[str] = None class SandboxBase(BaseModel): tag: Optional[str] =", "List[\"Student\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 applicants: List[\"Applicant\"] = Relationship(back_populates=\"sandbox\") # noqa: F821", "uuid from datetime import datetime from typing import Optional, List import pydantic from", "# noqa: F821 applicants: List[\"Applicant\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 class SandboxCreate(SandboxBase): tag:", "governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxUpdate(SandboxBase): id: uuid.UUID tag:", "[] class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] = None cred_def_id: Optional[str] = None cred_def_tag: Optional[str]", "Optional[Governance] = None class SandboxUpdate(SandboxBase): id: uuid.UUID tag: Optional[str] = None governance: Optional[Governance]", "class SandboxBase(BaseModel): tag: Optional[str] = Field(nullable=True) governance: dict = Field(default={}, sa_column=Column(JSON)) governance_cas: dict", "import BaseModel, BaseTable class SchemaDef(pydantic.BaseModel): id: Optional[str] = None name: Optional[str] = None", "None class SandboxBase(BaseModel): tag: Optional[str] = Field(nullable=True) governance: dict = Field(default={}, sa_column=Column(JSON)) governance_cas:", "Optional[str] = None class SandboxBase(BaseModel): tag: Optional[str] = Field(nullable=True) governance: dict = Field(default={},", "None name: Optional[str] = None version: Optional[str] = None attributes: Optional[List[str]] = []", "= None cred_def_tag: Optional[str] = None class SandboxBase(BaseModel): tag: Optional[str] = Field(nullable=True) governance:", "Optional[str] = None version: Optional[str] = None attributes: Optional[List[str]] = [] class Governance(pydantic.BaseModel):", "SandboxUpdate(SandboxBase): id: uuid.UUID tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance]", "List[\"Lob\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 students: List[\"Student\"] = Relationship(back_populates=\"sandbox\") # noqa: F821", "Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxRead(SandboxBase):", "import Field, Relationship from api.db.models.base import BaseModel, BaseTable class SchemaDef(pydantic.BaseModel): id: Optional[str] =", "id: uuid.UUID created_at: datetime updated_at: datetime tag: Optional[str] = None governance: Optional[Governance] =", "sqlalchemy import Column, JSON from sqlmodel import Field, Relationship from api.db.models.base import BaseModel,", "None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxUpdate(SandboxBase): id: uuid.UUID", "noqa: F821 class SandboxCreate(SandboxBase): tag: Optional[str] = None governance: Optional[Governance] = None governance_cas:", "= None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxUpdate(SandboxBase): id:", "from sqlalchemy import Column, JSON from sqlmodel import Field, Relationship from api.db.models.base import", "class SchemaDef(pydantic.BaseModel): id: Optional[str] = None name: Optional[str] = None version: Optional[str] =", "tag: Optional[str] = Field(nullable=True) governance: dict = Field(default={}, sa_column=Column(JSON)) governance_cas: dict = Field(default={},", "Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable, table=True): lobs: List[\"Lob\"] = Relationship(back_populates=\"sandbox\") # noqa: F821", "= None class SandboxUpdate(SandboxBase): id: uuid.UUID tag: Optional[str] = None governance: Optional[Governance] =", "tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class", "name: Optional[str] = None version: Optional[str] = None attributes: Optional[List[str]] = [] class", "datetime tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None", "governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxRead(SandboxBase): id: uuid.UUID created_at:", "Field(default={}, sa_column=Column(JSON)) governance_cas: dict = Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable, table=True): lobs: List[\"Lob\"]", "applicants: List[\"Applicant\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 class SandboxCreate(SandboxBase): tag: Optional[str] = None", "# noqa: F821 class SandboxCreate(SandboxBase): tag: Optional[str] = None governance: Optional[Governance] = None", "= None governance_cas: Optional[Governance] = None class SandboxRead(SandboxBase): id: uuid.UUID created_at: datetime updated_at:", "Optional[Governance] = None class SandboxRead(SandboxBase): id: uuid.UUID created_at: datetime updated_at: datetime tag: Optional[str]", "dict = Field(default={}, sa_column=Column(JSON)) governance_cas: dict = Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable, table=True):", "cred_def_tag: Optional[str] = None class SandboxBase(BaseModel): tag: Optional[str] = Field(nullable=True) governance: dict =", "SandboxRead(SandboxBase): id: uuid.UUID created_at: datetime updated_at: datetime tag: Optional[str] = None governance: Optional[Governance]", "class SandboxRead(SandboxBase): id: uuid.UUID created_at: datetime updated_at: datetime tag: Optional[str] = None governance:", "typing import Optional, List import pydantic from sqlalchemy import Column, JSON from sqlmodel", "uuid.UUID tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None", "= None class SandboxRead(SandboxBase): id: uuid.UUID created_at: datetime updated_at: datetime tag: Optional[str] =", "None class SandboxRead(SandboxBase): id: uuid.UUID created_at: datetime updated_at: datetime tag: Optional[str] = None", "BaseTable class SchemaDef(pydantic.BaseModel): id: Optional[str] = None name: Optional[str] = None version: Optional[str]", "None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxRead(SandboxBase): id: uuid.UUID", "import datetime from typing import Optional, List import pydantic from sqlalchemy import Column,", "JSON from sqlmodel import Field, Relationship from api.db.models.base import BaseModel, BaseTable class SchemaDef(pydantic.BaseModel):", "import pydantic from sqlalchemy import Column, JSON from sqlmodel import Field, Relationship from", "= None version: Optional[str] = None attributes: Optional[List[str]] = [] class Governance(pydantic.BaseModel): schema_def:", "from typing import Optional, List import pydantic from sqlalchemy import Column, JSON from", "# noqa: F821 students: List[\"Student\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 applicants: List[\"Applicant\"] =", "students: List[\"Student\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 applicants: List[\"Applicant\"] = Relationship(back_populates=\"sandbox\") # noqa:", "pydantic from sqlalchemy import Column, JSON from sqlmodel import Field, Relationship from api.db.models.base", "BaseModel, BaseTable class SchemaDef(pydantic.BaseModel): id: Optional[str] = None name: Optional[str] = None version:", "from sqlmodel import Field, Relationship from api.db.models.base import BaseModel, BaseTable class SchemaDef(pydantic.BaseModel): id:", "= None class SandboxBase(BaseModel): tag: Optional[str] = Field(nullable=True) governance: dict = Field(default={}, sa_column=Column(JSON))", "None attributes: Optional[List[str]] = [] class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] = None cred_def_id: Optional[str]", "= Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable, table=True): lobs: List[\"Lob\"] = Relationship(back_populates=\"sandbox\") # noqa:", "Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxUpdate(SandboxBase): id: uuid.UUID tag: Optional[str]", "uuid.UUID created_at: datetime updated_at: datetime tag: Optional[str] = None governance: Optional[Governance] = None", "F821 class SandboxCreate(SandboxBase): tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance]", "None cred_def_tag: Optional[str] = None class SandboxBase(BaseModel): tag: Optional[str] = Field(nullable=True) governance: dict", "Optional[str] = None name: Optional[str] = None version: Optional[str] = None attributes: Optional[List[str]]", "None governance_cas: Optional[Governance] = None class SandboxRead(SandboxBase): id: uuid.UUID created_at: datetime updated_at: datetime", "schema_def: Optional[SchemaDef] = None cred_def_id: Optional[str] = None cred_def_tag: Optional[str] = None class", "sqlmodel import Field, Relationship from api.db.models.base import BaseModel, BaseTable class SchemaDef(pydantic.BaseModel): id: Optional[str]", "Optional, List import pydantic from sqlalchemy import Column, JSON from sqlmodel import Field,", "= None cred_def_id: Optional[str] = None cred_def_tag: Optional[str] = None class SandboxBase(BaseModel): tag:", "None governance_cas: Optional[Governance] = None class SandboxUpdate(SandboxBase): id: uuid.UUID tag: Optional[str] = None", "= Field(default={}, sa_column=Column(JSON)) governance_cas: dict = Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable, table=True): lobs:", "Sandbox(SandboxBase, BaseTable, table=True): lobs: List[\"Lob\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 students: List[\"Student\"] =", "dict = Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable, table=True): lobs: List[\"Lob\"] = Relationship(back_populates=\"sandbox\") #", "None class SandboxUpdate(SandboxBase): id: uuid.UUID tag: Optional[str] = None governance: Optional[Governance] = None", "= Relationship(back_populates=\"sandbox\") # noqa: F821 students: List[\"Student\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 applicants:", "from api.db.models.base import BaseModel, BaseTable class SchemaDef(pydantic.BaseModel): id: Optional[str] = None name: Optional[str]", "Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxUpdate(SandboxBase):", "import Column, JSON from sqlmodel import Field, Relationship from api.db.models.base import BaseModel, BaseTable", "datetime updated_at: datetime tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance]", "import uuid from datetime import datetime from typing import Optional, List import pydantic", "F821 applicants: List[\"Applicant\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 class SandboxCreate(SandboxBase): tag: Optional[str] =", "class SandboxCreate(SandboxBase): tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] =", "governance_cas: dict = Field(default={}, sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable, table=True): lobs: List[\"Lob\"] = Relationship(back_populates=\"sandbox\")", "List import pydantic from sqlalchemy import Column, JSON from sqlmodel import Field, Relationship", "F821 students: List[\"Student\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 applicants: List[\"Applicant\"] = Relationship(back_populates=\"sandbox\") #", "sa_column=Column(JSON)) class Sandbox(SandboxBase, BaseTable, table=True): lobs: List[\"Lob\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 students:", "attributes: Optional[List[str]] = [] class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] = None cred_def_id: Optional[str] =", "governance_cas: Optional[Governance] = None class SandboxRead(SandboxBase): id: uuid.UUID created_at: datetime updated_at: datetime tag:", "List[\"Applicant\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 class SandboxCreate(SandboxBase): tag: Optional[str] = None governance:", "None version: Optional[str] = None attributes: Optional[List[str]] = [] class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef]", "Relationship(back_populates=\"sandbox\") # noqa: F821 students: List[\"Student\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 applicants: List[\"Applicant\"]", "SandboxCreate(SandboxBase): tag: Optional[str] = None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None", "None cred_def_id: Optional[str] = None cred_def_tag: Optional[str] = None class SandboxBase(BaseModel): tag: Optional[str]", "= None governance: Optional[Governance] = None governance_cas: Optional[Governance] = None class SandboxRead(SandboxBase): id:", "noqa: F821 applicants: List[\"Applicant\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 class SandboxCreate(SandboxBase): tag: Optional[str]", "table=True): lobs: List[\"Lob\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 students: List[\"Student\"] = Relationship(back_populates=\"sandbox\") #", "Optional[SchemaDef] = None cred_def_id: Optional[str] = None cred_def_tag: Optional[str] = None class SandboxBase(BaseModel):", "= Relationship(back_populates=\"sandbox\") # noqa: F821 applicants: List[\"Applicant\"] = Relationship(back_populates=\"sandbox\") # noqa: F821 class", "created_at: datetime updated_at: datetime tag: Optional[str] = None governance: Optional[Governance] = None governance_cas:", "Relationship(back_populates=\"sandbox\") # noqa: F821 class SandboxCreate(SandboxBase): tag: Optional[str] = None governance: Optional[Governance] =", "Optional[str] = None attributes: Optional[List[str]] = [] class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] = None", "Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] = None cred_def_id: Optional[str] = None cred_def_tag: Optional[str] = None", "Field, Relationship from api.db.models.base import BaseModel, BaseTable class SchemaDef(pydantic.BaseModel): id: Optional[str] = None", "= [] class Governance(pydantic.BaseModel): schema_def: Optional[SchemaDef] = None cred_def_id: Optional[str] = None cred_def_tag:" ]
[ "are utterly weak\") exit() elif self.lvl == 1: return 3 else: bonus =", "no magic ability,\\ and are utterly weak\") exit() elif self.lvl == 1: return", "and are utterly weak\") exit() elif self.lvl == 1: return 3 else: bonus", "_damage_roll_dice(self): dart_num = self._dart_num() base_damage_per_dart = {} total_damage_per_dart = {} for dart in", "except: raise TypeError(\"spell_slot_level should be an integer\") if spell_mode == \"roll_die\" or spell_mode", "clearly have no magic ability,\\ and are utterly weak\") exit() elif self.lvl ==", "total_damage } def _damage_roll_dice(self): dart_num = self._dart_num() base_damage_per_dart = {} total_damage_per_dart = {}", "return random.randint(1, 4) def _damage_roll_die(self): dart_num = self._dart_num() base_damage = self._attack_damage() damage_per_dart =", "} def _damage_roll_dice(self): dart_num = self._dart_num() base_damage_per_dart = {} total_damage_per_dart = {} for", "sum(total_damage_per_dart.values()) return { \"darts_fired\": dart_num, \"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\": total_damage } def", "+ bonus) def _attack_damage(self): for x in range(1): return random.randint(1, 4) def _damage_roll_die(self):", "total_damage_per_dart, \"total_damage_all_darts\": total_damage } def cast(self): if self.mode == \"roll_die\": return self._damage_roll_die() elif", "try: self.lvl = int(spell_slot_lvl) except: raise TypeError(\"spell_slot_level should be an integer\") if spell_mode", "return { \"darts_fired\": dart_num, \"base_damage\": base_damage, \"damage_per_dart\": damage_per_dart, \"total_damage\": total_damage } def _damage_roll_dice(self):", "= {} total_damage_per_dart = {} for dart in range(dart_num): damage = self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart", "elif self.lvl == 1: return 3 else: bonus = self.lvl - 1 return", "} def cast(self): if self.mode == \"roll_die\": return self._damage_roll_die() elif self.mode == \"roll_dice\":", "+ 1) total_damage = sum(total_damage_per_dart.values()) return { \"darts_fired\": dart_num, \"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart,", "if spell_mode == \"roll_die\" or spell_mode == \"roll_dice\": self.mode = spell_mode else: raise", "self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage) total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage + 1)", "def __init__(self, spell_slot_lvl, spell_mode): try: self.lvl = int(spell_slot_lvl) except: raise TypeError(\"spell_slot_level should be", "def _damage_roll_dice(self): dart_num = self._dart_num() base_damage_per_dart = {} total_damage_per_dart = {} for dart", "self._attack_damage() damage_per_dart = (base_damage + 1) total_damage = damage_per_dart * dart_num return {", "+ 1) total_damage = damage_per_dart * dart_num return { \"darts_fired\": dart_num, \"base_damage\": base_damage,", "random class MagicMissile: def __init__(self, spell_slot_lvl, spell_mode): try: self.lvl = int(spell_slot_lvl) except: raise", "= (damage) total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage + 1) total_damage = sum(total_damage_per_dart.values()) return", "cast(self): if self.mode == \"roll_die\": return self._damage_roll_die() elif self.mode == \"roll_dice\": return self._damage_roll_dice()", "{} for dart in range(dart_num): damage = self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage)", "spell_mode == \"roll_die\" or spell_mode == \"roll_dice\": self.mode = spell_mode else: raise Exception(\"spell_mode", "0: print(\"You clearly have no magic ability,\\ and are utterly weak\") exit() elif", "1)]\\ = (damage + 1) total_damage = sum(total_damage_per_dart.values()) return { \"darts_fired\": dart_num, \"base_damage_by_dart\":", "\"darts_fired\": dart_num, \"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\": total_damage } def cast(self): if self.mode", "spell_mode else: raise Exception(\"spell_mode should be 'roll_die', or 'roll_dice'\") def _dart_num(self): if self.lvl", "* dart_num return { \"darts_fired\": dart_num, \"base_damage\": base_damage, \"damage_per_dart\": damage_per_dart, \"total_damage\": total_damage }", "\"roll_die\" or spell_mode == \"roll_dice\": self.mode = spell_mode else: raise Exception(\"spell_mode should be", "bonus = self.lvl - 1 return (3 + bonus) def _attack_damage(self): for x", "base_damage, \"damage_per_dart\": damage_per_dart, \"total_damage\": total_damage } def _damage_roll_dice(self): dart_num = self._dart_num() base_damage_per_dart =", "x in range(1): return random.randint(1, 4) def _damage_roll_die(self): dart_num = self._dart_num() base_damage =", "1)]\\ = (damage) total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage + 1) total_damage = sum(total_damage_per_dart.values())", "self.lvl == 1: return 3 else: bonus = self.lvl - 1 return (3", "spell_slot_lvl, spell_mode): try: self.lvl = int(spell_slot_lvl) except: raise TypeError(\"spell_slot_level should be an integer\")", "ability,\\ and are utterly weak\") exit() elif self.lvl == 1: return 3 else:", "\"total_damage\": total_damage } def _damage_roll_dice(self): dart_num = self._dart_num() base_damage_per_dart = {} total_damage_per_dart =", "bonus) def _attack_damage(self): for x in range(1): return random.randint(1, 4) def _damage_roll_die(self): dart_num", "exit() elif self.lvl == 1: return 3 else: bonus = self.lvl - 1", "if self.lvl == 0: print(\"You clearly have no magic ability,\\ and are utterly", "total_damage = damage_per_dart * dart_num return { \"darts_fired\": dart_num, \"base_damage\": base_damage, \"damage_per_dart\": damage_per_dart,", "3 else: bonus = self.lvl - 1 return (3 + bonus) def _attack_damage(self):", "= spell_mode else: raise Exception(\"spell_mode should be 'roll_die', or 'roll_dice'\") def _dart_num(self): if", "\"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\": total_damage } def cast(self): if self.mode == \"roll_die\": return self._damage_roll_die()", "range(1): return random.randint(1, 4) def _damage_roll_die(self): dart_num = self._dart_num() base_damage = self._attack_damage() damage_per_dart", "dart_num, \"base_damage\": base_damage, \"damage_per_dart\": damage_per_dart, \"total_damage\": total_damage } def _damage_roll_dice(self): dart_num = self._dart_num()", "dart_num = self._dart_num() base_damage_per_dart = {} total_damage_per_dart = {} for dart in range(dart_num):", "== 1: return 3 else: bonus = self.lvl - 1 return (3 +", "1 return (3 + bonus) def _attack_damage(self): for x in range(1): return random.randint(1,", "4) def _damage_roll_die(self): dart_num = self._dart_num() base_damage = self._attack_damage() damage_per_dart = (base_damage +", "(damage + 1) total_damage = sum(total_damage_per_dart.values()) return { \"darts_fired\": dart_num, \"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\":", "be an integer\") if spell_mode == \"roll_die\" or spell_mode == \"roll_dice\": self.mode =", "== \"roll_dice\": self.mode = spell_mode else: raise Exception(\"spell_mode should be 'roll_die', or 'roll_dice'\")", "_damage_roll_die(self): dart_num = self._dart_num() base_damage = self._attack_damage() damage_per_dart = (base_damage + 1) total_damage", "should be 'roll_die', or 'roll_dice'\") def _dart_num(self): if self.lvl == 0: print(\"You clearly", "self._dart_num() base_damage = self._attack_damage() damage_per_dart = (base_damage + 1) total_damage = damage_per_dart *", "damage_per_dart * dart_num return { \"darts_fired\": dart_num, \"base_damage\": base_damage, \"damage_per_dart\": damage_per_dart, \"total_damage\": total_damage", "+ 1)]\\ = (damage + 1) total_damage = sum(total_damage_per_dart.values()) return { \"darts_fired\": dart_num,", "class MagicMissile: def __init__(self, spell_slot_lvl, spell_mode): try: self.lvl = int(spell_slot_lvl) except: raise TypeError(\"spell_slot_level", "1) total_damage = sum(total_damage_per_dart.values()) return { \"darts_fired\": dart_num, \"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\":", "or 'roll_dice'\") def _dart_num(self): if self.lvl == 0: print(\"You clearly have no magic", "TypeError(\"spell_slot_level should be an integer\") if spell_mode == \"roll_die\" or spell_mode == \"roll_dice\":", "spell_mode == \"roll_dice\": self.mode = spell_mode else: raise Exception(\"spell_mode should be 'roll_die', or", "self.lvl == 0: print(\"You clearly have no magic ability,\\ and are utterly weak\")", "damage = self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage) total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage", "integer\") if spell_mode == \"roll_die\" or spell_mode == \"roll_dice\": self.mode = spell_mode else:", "range(dart_num): damage = self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage) total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ =", "\"damage_per_dart\": damage_per_dart, \"total_damage\": total_damage } def _damage_roll_dice(self): dart_num = self._dart_num() base_damage_per_dart = {}", "an integer\") if spell_mode == \"roll_die\" or spell_mode == \"roll_dice\": self.mode = spell_mode", "def _damage_roll_die(self): dart_num = self._dart_num() base_damage = self._attack_damage() damage_per_dart = (base_damage + 1)", "def _dart_num(self): if self.lvl == 0: print(\"You clearly have no magic ability,\\ and", "\"total_damage_all_darts\": total_damage } def cast(self): if self.mode == \"roll_die\": return self._damage_roll_die() elif self.mode", "self.lvl = int(spell_slot_lvl) except: raise TypeError(\"spell_slot_level should be an integer\") if spell_mode ==", "print(\"You clearly have no magic ability,\\ and are utterly weak\") exit() elif self.lvl", "(base_damage + 1) total_damage = damage_per_dart * dart_num return { \"darts_fired\": dart_num, \"base_damage\":", "= {} for dart in range(dart_num): damage = self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ =", "Exception(\"spell_mode should be 'roll_die', or 'roll_dice'\") def _dart_num(self): if self.lvl == 0: print(\"You", "else: raise Exception(\"spell_mode should be 'roll_die', or 'roll_dice'\") def _dart_num(self): if self.lvl ==", "else: bonus = self.lvl - 1 return (3 + bonus) def _attack_damage(self): for", "total_damage } def cast(self): if self.mode == \"roll_die\": return self._damage_roll_die() elif self.mode ==", "return (3 + bonus) def _attack_damage(self): for x in range(1): return random.randint(1, 4)", "__init__(self, spell_slot_lvl, spell_mode): try: self.lvl = int(spell_slot_lvl) except: raise TypeError(\"spell_slot_level should be an", "damage_per_dart = (base_damage + 1) total_damage = damage_per_dart * dart_num return { \"darts_fired\":", "for x in range(1): return random.randint(1, 4) def _damage_roll_die(self): dart_num = self._dart_num() base_damage", "\"roll_dice\": self.mode = spell_mode else: raise Exception(\"spell_mode should be 'roll_die', or 'roll_dice'\") def", "1) total_damage = damage_per_dart * dart_num return { \"darts_fired\": dart_num, \"base_damage\": base_damage, \"damage_per_dart\":", "return 3 else: bonus = self.lvl - 1 return (3 + bonus) def", "{} total_damage_per_dart = {} for dart in range(dart_num): damage = self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart +", "(damage) total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage + 1) total_damage = sum(total_damage_per_dart.values()) return {", "base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\": total_damage } def cast(self): if self.mode == \"roll_die\": return", "damage_per_dart, \"total_damage\": total_damage } def _damage_roll_dice(self): dart_num = self._dart_num() base_damage_per_dart = {} total_damage_per_dart", "random.randint(1, 4) def _damage_roll_die(self): dart_num = self._dart_num() base_damage = self._attack_damage() damage_per_dart = (base_damage", "= damage_per_dart * dart_num return { \"darts_fired\": dart_num, \"base_damage\": base_damage, \"damage_per_dart\": damage_per_dart, \"total_damage\":", "have no magic ability,\\ and are utterly weak\") exit() elif self.lvl == 1:", "dart_num = self._dart_num() base_damage = self._attack_damage() damage_per_dart = (base_damage + 1) total_damage =", "or spell_mode == \"roll_dice\": self.mode = spell_mode else: raise Exception(\"spell_mode should be 'roll_die',", "_dart_num(self): if self.lvl == 0: print(\"You clearly have no magic ability,\\ and are", "\"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\": total_damage } def cast(self): if self.mode == \"roll_die\":", "= self._dart_num() base_damage_per_dart = {} total_damage_per_dart = {} for dart in range(dart_num): damage", "{ \"darts_fired\": dart_num, \"base_damage\": base_damage, \"damage_per_dart\": damage_per_dart, \"total_damage\": total_damage } def _damage_roll_dice(self): dart_num", "total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage + 1) total_damage = sum(total_damage_per_dart.values()) return { \"darts_fired\":", "- 1 return (3 + bonus) def _attack_damage(self): for x in range(1): return", "= self.lvl - 1 return (3 + bonus) def _attack_damage(self): for x in", "def cast(self): if self.mode == \"roll_die\": return self._damage_roll_die() elif self.mode == \"roll_dice\": return", "total_damage_per_dart = {} for dart in range(dart_num): damage = self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\", "== 0: print(\"You clearly have no magic ability,\\ and are utterly weak\") exit()", "utterly weak\") exit() elif self.lvl == 1: return 3 else: bonus = self.lvl", "self._dart_num() base_damage_per_dart = {} total_damage_per_dart = {} for dart in range(dart_num): damage =", "in range(dart_num): damage = self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage) total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\", "= self._attack_damage() damage_per_dart = (base_damage + 1) total_damage = damage_per_dart * dart_num return", "MagicMissile: def __init__(self, spell_slot_lvl, spell_mode): try: self.lvl = int(spell_slot_lvl) except: raise TypeError(\"spell_slot_level should", "(3 + bonus) def _attack_damage(self): for x in range(1): return random.randint(1, 4) def", "\"base_damage\": base_damage, \"damage_per_dart\": damage_per_dart, \"total_damage\": total_damage } def _damage_roll_dice(self): dart_num = self._dart_num() base_damage_per_dart", "spell_mode): try: self.lvl = int(spell_slot_lvl) except: raise TypeError(\"spell_slot_level should be an integer\") if", "= int(spell_slot_lvl) except: raise TypeError(\"spell_slot_level should be an integer\") if spell_mode == \"roll_die\"", "dart in range(dart_num): damage = self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage) total_damage_per_dart[\"dart_{}\".format(dart +", "= (damage + 1) total_damage = sum(total_damage_per_dart.values()) return { \"darts_fired\": dart_num, \"base_damage_by_dart\": base_damage_per_dart,", "int(spell_slot_lvl) except: raise TypeError(\"spell_slot_level should be an integer\") if spell_mode == \"roll_die\" or", "raise Exception(\"spell_mode should be 'roll_die', or 'roll_dice'\") def _dart_num(self): if self.lvl == 0:", "{ \"darts_fired\": dart_num, \"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\": total_damage } def cast(self): if", "in range(1): return random.randint(1, 4) def _damage_roll_die(self): dart_num = self._dart_num() base_damage = self._attack_damage()", "dart_num return { \"darts_fired\": dart_num, \"base_damage\": base_damage, \"damage_per_dart\": damage_per_dart, \"total_damage\": total_damage } def", "_attack_damage(self): for x in range(1): return random.randint(1, 4) def _damage_roll_die(self): dart_num = self._dart_num()", "be 'roll_die', or 'roll_dice'\") def _dart_num(self): if self.lvl == 0: print(\"You clearly have", "def _attack_damage(self): for x in range(1): return random.randint(1, 4) def _damage_roll_die(self): dart_num =", "= (base_damage + 1) total_damage = damage_per_dart * dart_num return { \"darts_fired\": dart_num,", "= sum(total_damage_per_dart.values()) return { \"darts_fired\": dart_num, \"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\": total_damage }", "dart_num, \"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\": total_damage } def cast(self): if self.mode ==", "= self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage) total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage +", "for dart in range(dart_num): damage = self._attack_damage() base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage) total_damage_per_dart[\"dart_{}\".format(dart", "'roll_die', or 'roll_dice'\") def _dart_num(self): if self.lvl == 0: print(\"You clearly have no", "'roll_dice'\") def _dart_num(self): if self.lvl == 0: print(\"You clearly have no magic ability,\\", "base_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage) total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage + 1) total_damage", "should be an integer\") if spell_mode == \"roll_die\" or spell_mode == \"roll_dice\": self.mode", "base_damage = self._attack_damage() damage_per_dart = (base_damage + 1) total_damage = damage_per_dart * dart_num", "== \"roll_die\" or spell_mode == \"roll_dice\": self.mode = spell_mode else: raise Exception(\"spell_mode should", "self.mode = spell_mode else: raise Exception(\"spell_mode should be 'roll_die', or 'roll_dice'\") def _dart_num(self):", "raise TypeError(\"spell_slot_level should be an integer\") if spell_mode == \"roll_die\" or spell_mode ==", "1: return 3 else: bonus = self.lvl - 1 return (3 + bonus)", "= self._dart_num() base_damage = self._attack_damage() damage_per_dart = (base_damage + 1) total_damage = damage_per_dart", "base_damage_per_dart = {} total_damage_per_dart = {} for dart in range(dart_num): damage = self._attack_damage()", "+ 1)]\\ = (damage) total_damage_per_dart[\"dart_{}\".format(dart + 1)]\\ = (damage + 1) total_damage =", "\"darts_fired\": dart_num, \"base_damage\": base_damage, \"damage_per_dart\": damage_per_dart, \"total_damage\": total_damage } def _damage_roll_dice(self): dart_num =", "return { \"darts_fired\": dart_num, \"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\": total_damage } def cast(self):", "total_damage = sum(total_damage_per_dart.values()) return { \"darts_fired\": dart_num, \"base_damage_by_dart\": base_damage_per_dart, \"total_damage_by_dart\": total_damage_per_dart, \"total_damage_all_darts\": total_damage", "self.lvl - 1 return (3 + bonus) def _attack_damage(self): for x in range(1):", "magic ability,\\ and are utterly weak\") exit() elif self.lvl == 1: return 3", "import random class MagicMissile: def __init__(self, spell_slot_lvl, spell_mode): try: self.lvl = int(spell_slot_lvl) except:", "weak\") exit() elif self.lvl == 1: return 3 else: bonus = self.lvl -" ]
[ "phi_mu, gamma_lambda, gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta", "Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) #### Parallel Case def parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z): possible_z[ind_non] =", "in nonzero_y: prob_z = np.zeros(9) prob_z = Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\\ for", "and z one after another # number of steps Gibbs we want to", "extreme_case: # define conditional likelihood for theta loglikelihood_theta = lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y,", "np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta = np.array([]) for array in [beta_lambda,", "Sampler import EllipticalSliceSampling from timeseries_cp import cptimeseries from timeseries_cp_extreme import cptimeseries_extreme import sys", "NameError, ZeroDivisionError, OSError): continue break #### Step 3: Sample lambda_aray ### lambda_square_array_state =", "for theta loglikelihood_theta = lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ##", "z_state[zero_y_indices] = 0 #z_state an array of 0, 1 theta_state = theta_0 lambda_square_array_state", "#### Parallel Case def parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z): possible_z[ind_non] = ind_z + 1", "as the prior. ## f_0 defines the present state of the Markov chain", "y, X)) #### Now we want to implment a Gibbs sample where we", "the quantiles y_non_zero = Y[Y>0] edge1 = np.quantile(y_non_zero, 0.25) edge2 = np.quantile(y_non_zero, 0.5)", "(1 / tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration successfully finished' ) # Add to stored samples", ") # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if", "Theta, Z, Lambda, Nu, Tau, Eta = [], [], [], [], [], []", "var-cov matrix of Multivariate normal used as the prior. ## f_0 defines the", "= 0.1 for ind_Gibbs in range(n_step_Gibbs): #print(ind_Gibbs) ##### Copy the present state of", "#### Simulated data if extreme_case: z, y, lambda_t, _, _ = cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z,", "lambda z: cptimeseries(theta_state).loglikelihood(z, Y, X) # Step 2: Sample/Update z possible_z = z_state", "are the mean and var-cov matrix of Multivariate normal used as the prior.", "of Multivariate normal used as the prior. ## f_0 defines the present state", "defines the present state of the Markov chain Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0,", "delayed year = 2000 #For now, we're focusing on a single year extreme_case", "0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[1.44, 0, 0, 0, 0, 0],", "possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z / np.sum(prob_z)) z_state = possible_z except (RuntimeError, ValueError,", "cptimeseries(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here Mean and Sigma are the", "( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) #### Step 5: Sample nu_array ### nu_array_state =", "from timeseries_cp_extreme import cptimeseries_extreme import sys from joblib import Parallel, delayed year =", "state of the variables to sample .. #### theta_state = copy.deepcopy(Theta[-1]) z_state =", "(1, 2, 3, 4), based on the quantiles y_non_zero = Y[Y>0] edge1 =", "0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[-0.45, 0, 0, 0, 0, 0], scale=1/6)", "define conditional likelihood for z loglikelihood_z = lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X) else:", "likelihood for theta loglikelihood_theta = lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X) # Sample/Update theta", "array of 0, 1 theta_state = theta_0 lambda_square_array_state = np.diag(Sigma_0) nu_array_state = invgamma.rvs(a=1,", "beta_omega, phi_lambda, phi_mu, gamma_lambda, gamma_mu]: true_theta = np.concatenate([true_theta, array]) Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,)))))", "import cptimeseries from timeseries_cp_extreme import cptimeseries_extreme import sys from joblib import Parallel, delayed", "(edge3<Y) & (Y<=edge4) z_state = np.ones(shape=Y.shape) z_state[bin_2] = 2 z_state[bin_3] = 3 z_state[bin_4]", "cptimeseries_extreme(theta_state).loglikelihood(z, Y, X) else: # define conditional likelihood for theta loglikelihood_theta = lambda", "en[np.invert(bool_y_zero)] ## Lets first initialize theta and z for a Markov chain ##", "= np.diag(lambda_square_array_state * tau_square_state) while True: try: #### Step 1: Sample theta using", "/ eta_state + 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) # Add to", "(1 / lambda_square_array_state)) eta_state = invgamma.rvs(a=0.5, scale=1) tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) /", "bin_2 = (edge1<=Y) & (Y<=edge2) bin_3 = (edge2<Y) & (Y<=edge3) bin_4 = (edge3<Y)", "Sigma_0 = np.diag(lambda_square_array_state * tau_square_state) while True: try: #### Step 1: Sample theta", "location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model fields X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain fall Y", "# number of steps Gibbs we want to use n_step_Gibbs = 1 ###", "np.isfinite(prob_z) prob_z = np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices])) possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z / np.sum(prob_z))", "a single year extreme_case = True location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model fields X", "3: Sample lambda_aray ### lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 / nu_array_state) + (theta_state **", "= np.zeros(9) prob_z = Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\\ for ind_z in range(9))", "Gibbs we want to use n_step_Gibbs = 1 ### Lists to store the", "samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) #### Parallel Case def parallel_indices(ind_non, ind_z,", "Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0,", "# Step 2: Sample/Update z possible_z = z_state nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for", "0, 0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(32,)))) true_theta = theta_0", "= invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 / eta_state + 0.5 * (", "#For now, we're focusing on a single year extreme_case = True location =", "= invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) #### Step 6: Sample eta ###", "samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if extreme_case: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries_extreme(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y,", "a Gibbs sample where we update theta and z one after another #", "plt from Sampler import EllipticalSliceSampling from timeseries_cp import cptimeseries from timeseries_cp_extreme import cptimeseries_extreme", "tau_square_state)) #### Step 4: Sample tau ### tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) /", "_ = cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X)) else: z, y, lambda_t, _, _ =", "axis=0) #print(prob_z) finite_indices = np.isfinite(prob_z) prob_z = np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices])) possible_z[ind_nonzero] = np.random.choice(a=np.arange(1,", "include the prior in z inside the loglikelihood (final step) prob_z[ind_z] = loglikelihood_z(possible_z)", "z_state = possible_z except (RuntimeError, ValueError, TypeError, NameError, ZeroDivisionError, OSError): continue break ####", "Mean and Sigma are the mean and var-cov matrix of Multivariate normal used", "get distribution of rainfalls and calculate quantiles #### Then use this to initialise", "eta_state = invgamma.rvs(a=1, scale=1 + (1 / tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration successfully finished' )", "bin_3 = (edge2<Y) & (Y<=edge3) bin_4 = (edge3<Y) & (Y<=edge4) z_state = np.ones(shape=Y.shape)", "/ lambda_square_array_state)) #### Step 6: Sample eta ### eta_state = invgamma.rvs(a=1, scale=1 +", "#### Now we want to implment a Gibbs sample where we update theta", "# Extract zero/non-zero indices of y en = np.arange(len(Y)) bool_y_zero = (Y==0) zero_y_indices", "= [], [], [], [], [], [] # Extract zero/non-zero indices of y", "1.44, 0, 0, 0, 0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(20,))))", "data if extreme_case: z, y, lambda_t, _, _ = cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X))", "#### Step 1: Sample theta using Elliptic Slice Sampler #### if extreme_case: #", "prob_z = np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices])) possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z / np.sum(prob_z)) z_state", "\"\"\" import copy from scipy.stats import invgamma as invgamma import numpy as np", "lambda_t, _, _ = cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y, X)) #### Now we want to", "nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) eta_state = invgamma.rvs(a=0.5, scale=1) tau_square_state", "initialise z (1, 2, 3, 4), based on the quantiles y_non_zero = Y[Y>0]", "to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) #### Parallel Case def", "and var-cov matrix of Multivariate normal used as the prior. ## f_0 defines", "#print(ind_Gibbs) ##### Copy the present state of the variables to sample .. ####", "0, 1 theta_state = theta_0 lambda_square_array_state = np.diag(Sigma_0) nu_array_state = invgamma.rvs(a=1, scale=1 +", "mean for them if extreme_case: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0,", "# define conditional likelihood for z loglikelihood_z = lambda z: cptimeseries(theta_state).loglikelihood(z, Y, X)", "nu_array_state) + (theta_state ** 2) / (2 * tau_square_state)) #### Step 4: Sample", "3, 4), based on the quantiles y_non_zero = Y[Y>0] edge1 = np.quantile(y_non_zero, 0.25)", "### lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 / nu_array_state) + (theta_state ** 2) / (2", "np.zeros(shape=(20,)))) ## Realistic priors ## # Sampling from prior to define a true_theta", "define conditional likelihood for z loglikelihood_z = lambda z: cptimeseries(theta_state).loglikelihood(z, Y, X) #", "np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta = np.array([]) for array", "= invgamma.rvs(a=1, scale=1 + (1 / tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration successfully finished' ) #", "y, lambda_t, _, _ = cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y, X)) #### Now we want", "nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for ind_nonzero in nonzero_y: prob_z = np.zeros(9) prob_z =", "loc=[1.44, 0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[-0.45, 0, 0, 0,", "define conditional likelihood for theta loglikelihood_theta = lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X) #", "we want to implment a Gibbs sample where we update theta and z", "##### Copy the present state of the variables to sample .. #### theta_state", "gamma_lambda, gamma_mu]: true_theta = np.concatenate([true_theta, array]) Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) #### Simulated", "_ = cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y, X)) #### Now we want to implment a", "loglikelihood (final step) prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z perc = 0.1 for", "np.zeros(9) prob_z = Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\\ for ind_z in range(9)) prob_z", "Z, Lambda, Nu, Tau, Eta = [], [], [], [], [], [] #", "##### Defining the priors from Sherman's paper .... without prior on sigmas, so", "the present state of the Markov chain Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0,", "TypeError, NameError, ZeroDivisionError, OSError): continue break #### Step 3: Sample lambda_aray ### lambda_square_array_state", "Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) #### Parallel Case", "as plt from Sampler import EllipticalSliceSampling from timeseries_cp import cptimeseries from timeseries_cp_extreme import", "0, 0, 0, 0], np.zeros(shape=(32,)))) true_theta = theta_0 Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else:", "range(9)) prob_z = np.sum(prob_z, axis=0) #print(prob_z) finite_indices = np.isfinite(prob_z) prob_z = np.exp(prob_z[finite_indices] -", "= en[np.invert(bool_y_zero)] ## Lets first initialize theta and z for a Markov chain", "scale=1/(1.3*65)) true_theta = np.array([]) for array in [beta_lambda, beta_mu, beta_omega, phi_lambda, phi_mu, gamma_lambda,", "prior to define a true_theta beta_lambda, beta_mu, beta_omega = np.random.normal(size=(6,), loc=[-0.46, 0, 0,", "0, 0, 0], np.zeros(shape=(32,)))) true_theta = theta_0 Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0", "a true_theta beta_lambda, beta_mu, beta_omega = np.random.normal(size=(6,), loc=[-0.46, 0, 0, 0, 0, 0],", "scale=1 + (1 / tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration successfully finished' ) # Add to", "import copy from scipy.stats import invgamma as invgamma import numpy as np from", "break #### Step 3: Sample lambda_aray ### lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 / nu_array_state)", "after another # number of steps Gibbs we want to use n_step_Gibbs =", "just taking mean for them if extreme_case: theta_0 = np.concatenate(([-0.46, 0, 0, 0,", "<gh_stars>0 \"\"\" Improved sampling code, with horseshoe prior \"\"\" import copy from scipy.stats", "in range(9)) prob_z = np.sum(prob_z, axis=0) #print(prob_z) finite_indices = np.isfinite(prob_z) prob_z = np.exp(prob_z[finite_indices]", "z for a Markov chain ## #### For non-zero y, get distribution of", "0, 0, 0, 0, 0, 1.44, 0, 0, 0, 0, 0, -0.45, 0,", "Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if extreme_case: print(str(ind_Gibbs)+'-st/th", "# Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if extreme_case:", "nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) #### Step 6: Sample eta", "theta ## Here Mean and Sigma are the mean and var-cov matrix of", "this to initialise z (1, 2, 3, 4), based on the quantiles y_non_zero", "np.quantile(y_non_zero, 0.5) edge3 = np.quantile(y_non_zero, 0.75) edge4 = np.max(Y) bin_2 = (edge1<=Y) &", "= Y[Y>0] edge1 = np.quantile(y_non_zero, 0.25) edge2 = np.quantile(y_non_zero, 0.5) edge3 = np.quantile(y_non_zero,", "/ tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration successfully finished' ) # Add to stored samples Theta.append(copy.deepcopy(theta_state))", "X)) #### Now we want to implment a Gibbs sample where we update", "lambda_t, _, _ = cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X)) else: z, y, lambda_t, _,", "invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 / nu_array_state) + (theta_state ** 2) / (2 * tau_square_state)) ####", "tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration successfully finished' ) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state))", "z_state[bin_3] = 3 z_state[bin_4] = 4 z_state[zero_y_indices] = 0 #z_state an array of", ".... without prior on sigmas, so just taking mean for them if extreme_case:", "iteration successfully finished' ) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state))", "= theta_0 lambda_square_array_state = np.diag(Sigma_0) nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state))", "np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z / np.sum(prob_z)) z_state = possible_z except (RuntimeError, ValueError, TypeError, NameError,", "(1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0, 0,", "if extreme_case: z, y, lambda_t, _, _ = cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X)) else:", "(edge2<Y) & (Y<=edge3) bin_4 = (edge3<Y) & (Y<=edge4) z_state = np.ones(shape=Y.shape) z_state[bin_2] =", "scale=(1 / eta_state + 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) #### Step", "= np.diag(Sigma_0) nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) eta_state = invgamma.rvs(a=0.5,", "y, lambda_t, _, _ = cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X)) else: z, y, lambda_t,", "loglikelihood_z = lambda z: cptimeseries(theta_state).loglikelihood(z, Y, X) # Step 2: Sample/Update z possible_z", "step) prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z perc = 0.1 for ind_Gibbs in", "scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta = np.array([]) for array in", "Lists to store the samples Theta, Z, Lambda, Nu, Tau, Eta = [],", "y en = np.arange(len(Y)) bool_y_zero = (Y==0) zero_y_indices = en[bool_y_zero] nonzero_y_indices = en[np.invert(bool_y_zero)]", "Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if extreme_case: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries_extreme(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y, X))) else: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood:", "Simulated data if extreme_case: z, y, lambda_t, _, _ = cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y,", "for them if extreme_case: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44,", "Improved sampling code, with horseshoe prior \"\"\" import copy from scipy.stats import invgamma", "Sample theta using Elliptic Slice Sampler #### if extreme_case: # define conditional likelihood", "nonzero_y: prob_z = np.zeros(9) prob_z = Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\\ for ind_z", "Lambda, Nu, Tau, Eta = [], [], [], [], [], [] # Extract", "+ 1) / 2), scale=(1 / eta_state + 0.5 * ( np.sum(np.power(theta_state, 2)", "Mean=theta_0, Sigma=Sigma_0, f_0=theta_state) theta_state = Samples[-1] # define conditional likelihood for z loglikelihood_z", "z_state[bin_4] = 4 z_state[zero_y_indices] = 0 #z_state an array of 0, 1 theta_state", "= possible_z except (RuntimeError, ValueError, TypeError, NameError, ZeroDivisionError, OSError): continue break #### Step", "non-zero y, get distribution of rainfalls and calculate quantiles #### Then use this", "lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X) else: # define conditional likelihood for theta loglikelihood_theta", "= Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\\ for ind_z in range(9)) prob_z = np.sum(prob_z,", "0], scale=1/6), \\ np.random.normal(size=(6,), loc=[-0.45, 0, 0, 0, 0, 0], scale=1/6) phi_lambda, phi_mu,", "+ (theta_state ** 2) / (2 * tau_square_state)) #### Step 4: Sample tau", "the Markov chain Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0, f_0=theta_state) theta_state = Samples[-1]", "= np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0, 0, 0, 0, 0,", "0.75) edge4 = np.max(Y) bin_2 = (edge1<=Y) & (Y<=edge2) bin_3 = (edge2<Y) &", "to initialise z (1, 2, 3, 4), based on the quantiles y_non_zero =", "This is wrong - include the prior in z inside the loglikelihood (final", "implment a Gibbs sample where we update theta and z one after another", "conditional likelihood for theta loglikelihood_theta = lambda theta: cptimeseries(theta).loglikelihood(z_state, Y, X) # Sample/Update", "Slice Sampler #### if extreme_case: # define conditional likelihood for theta loglikelihood_theta =", "= invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 / nu_array_state) + (theta_state ** 2) / (2 * tau_square_state))", "#### For non-zero y, get distribution of rainfalls and calculate quantiles #### Then", "invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) #### Step 6: Sample eta ### eta_state", "timeseries_cp_extreme import cptimeseries_extreme import sys from joblib import Parallel, delayed year = 2000", "bool_y_zero = (Y==0) zero_y_indices = en[bool_y_zero] nonzero_y_indices = en[np.invert(bool_y_zero)] ## Lets first initialize", "np.concatenate([true_theta, array]) Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) #### Simulated data if extreme_case: z,", "distribution of rainfalls and calculate quantiles #### Then use this to initialise z", "Sherman's paper .... without prior on sigmas, so just taking mean for them", "the present state of the variables to sample .. #### theta_state = copy.deepcopy(Theta[-1])", "+ 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) #### Step 5: Sample nu_array", "#### This is wrong - include the prior in z inside the loglikelihood", "= Samples[-1] # define conditional likelihood for z loglikelihood_z = lambda z: cptimeseries_extreme(theta_state).loglikelihood(z,", "Now we want to implment a Gibbs sample where we update theta and", "lambda_square_array_state = np.diag(Sigma_0) nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) eta_state =", "ind_z, possible_z, loglikelihood_z): possible_z[ind_non] = ind_z + 1 #### This is wrong -", "scale=1/6), \\ np.random.normal(size=(6,), loc=[1.44, 0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[-0.45,", "tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 / eta_state + 0.5 *", "& (Y<=edge2) bin_3 = (edge2<Y) & (Y<=edge3) bin_4 = (edge3<Y) & (Y<=edge4) z_state", "prior in z inside the loglikelihood (final step) prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return", "np.diag(Sigma_0) nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) eta_state = invgamma.rvs(a=0.5, scale=1)", "another # number of steps Gibbs we want to use n_step_Gibbs = 1", "copy.deepcopy(Nu[-1]) tau_square_state = copy.deepcopy(Tau[-1]) eta_state = copy.deepcopy(Eta[-1]) Sigma_0 = np.diag(lambda_square_array_state * tau_square_state) while", "first initialize theta and z for a Markov chain ## #### For non-zero", "6: Sample eta ### eta_state = invgamma.rvs(a=1, scale=1 + (1 / tau_square_state)) print(str(ind_Gibbs)+'-st/th", "np.quantile(y_non_zero, 0.25) edge2 = np.quantile(y_non_zero, 0.5) edge3 = np.quantile(y_non_zero, 0.75) edge4 = np.max(Y)", "Elliptic Slice Sampler #### if extreme_case: # define conditional likelihood for theta loglikelihood_theta", "0, 0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(20,)))) ## Realistic priors", "phi_lambda, phi_mu, gamma_lambda, gamma_mu]: true_theta = np.concatenate([true_theta, array]) Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape)", "steps Gibbs we want to use n_step_Gibbs = 1 ### Lists to store", "np.random.normal(size=(6,), loc=[-0.45, 0, 0, 0, 0, 0], scale=1/6) phi_lambda, phi_mu, gamma_lambda, gamma_mu =", "prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\\ for ind_z in range(9)) prob_z = np.sum(prob_z, axis=0) #print(prob_z)", "(Y<=edge3) bin_4 = (edge3<Y) & (Y<=edge4) z_state = np.ones(shape=Y.shape) z_state[bin_2] = 2 z_state[bin_3]", "quantiles #### Then use this to initialise z (1, 2, 3, 4), based", "= 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model fields X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain fall Y =", "+ (1 / lambda_square_array_state)) eta_state = invgamma.rvs(a=0.5, scale=1) tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1)", "= np.arange(len(Y)) bool_y_zero = (Y==0) zero_y_indices = en[bool_y_zero] nonzero_y_indices = en[np.invert(bool_y_zero)] ## Lets", "try: #### Step 1: Sample theta using Elliptic Slice Sampler #### if extreme_case:", "true_theta = theta_0 Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0 = np.concatenate(([-0.46, 0, 0,", "0, 0, 0, 0], np.zeros(shape=(20,)))) ## Realistic priors ## # Sampling from prior", "_, _ = cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X)) else: z, y, lambda_t, _, _", "theta_state = copy.deepcopy(Theta[-1]) z_state = copy.deepcopy(Z[-1]) lambda_square_array_state = copy.deepcopy(Lambda[-1]) nu_array_state = copy.deepcopy(Nu[-1]) tau_square_state", "loglikelihood_z = lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X) else: # define conditional likelihood for", "= lambda z: cptimeseries(theta_state).loglikelihood(z, Y, X) # Step 2: Sample/Update z possible_z =", "eta_state + 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) #### Step 5: Sample", "on the quantiles y_non_zero = Y[Y>0] edge1 = np.quantile(y_non_zero, 0.25) edge2 = np.quantile(y_non_zero,", "Y, X) # Sample/Update theta ## Here Mean and Sigma are the mean", "year = 2000 #For now, we're focusing on a single year extreme_case =", "= np.isfinite(prob_z) prob_z = np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices])) possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z /", "(Y<=edge2) bin_3 = (edge2<Y) & (Y<=edge3) bin_4 = (edge3<Y) & (Y<=edge4) z_state =", "to use n_step_Gibbs = 1 ### Lists to store the samples Theta, Z,", "if extreme_case: # define conditional likelihood for theta loglikelihood_theta = lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state,", "prior \"\"\" import copy from scipy.stats import invgamma as invgamma import numpy as", "Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0, f_0=theta_state) theta_state = Samples[-1] # define conditional", "np.array([]) for array in [beta_lambda, beta_mu, beta_omega, phi_lambda, phi_mu, gamma_lambda, gamma_mu]: true_theta =", "import cptimeseries_extreme import sys from joblib import Parallel, delayed year = 2000 #For", "numpy as np from scipy.stats import gamma, multivariate_normal import pylab as plt from", "4), based on the quantiles y_non_zero = Y[Y>0] edge1 = np.quantile(y_non_zero, 0.25) edge2", "[] # Extract zero/non-zero indices of y en = np.arange(len(Y)) bool_y_zero = (Y==0)", "Sample/Update z possible_z = z_state nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for ind_nonzero in nonzero_y:", "Parallel, delayed year = 2000 #For now, we're focusing on a single year", "prob_z = np.zeros(9) prob_z = Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\\ for ind_z in", "0, 0, 0, 0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(20,)))) ##", "continue break #### Step 3: Sample lambda_aray ### lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 /", "prob_z = np.sum(prob_z, axis=0) #print(prob_z) finite_indices = np.isfinite(prob_z) prob_z = np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices]))", "gamma, multivariate_normal import pylab as plt from Sampler import EllipticalSliceSampling from timeseries_cp import", "Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) #### Parallel Case def parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z): possible_z[ind_non]", "1.44, 0, 0, 0, 0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(32,))))", "for array in [beta_lambda, beta_mu, beta_omega, phi_lambda, phi_mu, gamma_lambda, gamma_mu]: true_theta = np.concatenate([true_theta,", "= copy.deepcopy(Nu[-1]) tau_square_state = copy.deepcopy(Tau[-1]) eta_state = copy.deepcopy(Eta[-1]) Sigma_0 = np.diag(lambda_square_array_state * tau_square_state)", "Sample lambda_aray ### lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 / nu_array_state) + (theta_state ** 2)", "print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X)) else: z, y, lambda_t, _, _ = cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y,", "sampling code, with horseshoe prior \"\"\" import copy from scipy.stats import invgamma as", "now, we're focusing on a single year extreme_case = True location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\"", "Y, X) # Step 2: Sample/Update z possible_z = z_state nonzero_y = np.random.choice(nonzero_y_indices,", "z one after another # number of steps Gibbs we want to use", "2, 3, 4), based on the quantiles y_non_zero = Y[Y>0] edge1 = np.quantile(y_non_zero,", "the priors from Sherman's paper .... without prior on sigmas, so just taking", "/ (lambda_square_array_state))))) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state))", "#### Step 5: Sample nu_array ### nu_array_state = invgamma.rvs(a=1, scale=1 + (1 /", "/ eta_state + 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) #### Step 5:", "0], np.zeros(shape=(32,)))) true_theta = theta_0 Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0 = np.concatenate(([-0.46,", "X)) else: z, y, lambda_t, _, _ = cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y, X)) ####", "= np.sum(prob_z, axis=0) #print(prob_z) finite_indices = np.isfinite(prob_z) prob_z = np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices])) possible_z[ind_nonzero]", "np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) #### Step 5: Sample nu_array ### nu_array_state = invgamma.rvs(a=1,", "possible_z except (RuntimeError, ValueError, TypeError, NameError, ZeroDivisionError, OSError): continue break #### Step 3:", "likelihood for theta loglikelihood_theta = lambda theta: cptimeseries(theta).loglikelihood(z_state, Y, X) # Sample/Update theta", "import gamma, multivariate_normal import pylab as plt from Sampler import EllipticalSliceSampling from timeseries_cp", "cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X)) else: z, y, lambda_t, _, _ = cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z,", "= (Y==0) zero_y_indices = en[bool_y_zero] nonzero_y_indices = en[np.invert(bool_y_zero)] ## Lets first initialize theta", "np.sum(prob_z, axis=0) #print(prob_z) finite_indices = np.isfinite(prob_z) prob_z = np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices])) possible_z[ind_nonzero] =", "0, 0, 0, 0, 0], np.zeros(shape=(20,)))) ## Realistic priors ## # Sampling from", "'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model fields X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain fall Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year))", "wrong - include the prior in z inside the loglikelihood (final step) prob_z[ind_z]", "y_non_zero = Y[Y>0] edge1 = np.quantile(y_non_zero, 0.25) edge2 = np.quantile(y_non_zero, 0.5) edge3 =", "lambda_aray ### lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 / nu_array_state) + (theta_state ** 2) /", "state of the Markov chain Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0, f_0=theta_state) theta_state", "z (1, 2, 3, 4), based on the quantiles y_non_zero = Y[Y>0] edge1", "= cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y, X)) #### Now we want to implment a Gibbs", "indices of y en = np.arange(len(Y)) bool_y_zero = (Y==0) zero_y_indices = en[bool_y_zero] nonzero_y_indices", "taking mean for them if extreme_case: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0,", "/ nu_array_state) + (theta_state ** 2) / (2 * tau_square_state)) #### Step 4:", "np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta = np.array([]) for array in [beta_lambda, beta_mu, beta_omega, phi_lambda, phi_mu,", "n=1, Mean=theta_0, Sigma=Sigma_0, f_0=theta_state) theta_state = Samples[-1] # define conditional likelihood for z", "- include the prior in z inside the loglikelihood (final step) prob_z[ind_z] =", "z possible_z = z_state nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for ind_nonzero in nonzero_y: prob_z", "= np.array([]) for array in [beta_lambda, beta_mu, beta_omega, phi_lambda, phi_mu, gamma_lambda, gamma_mu]: true_theta", "= invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) eta_state = invgamma.rvs(a=0.5, scale=1) tau_square_state =", "invgamma import numpy as np from scipy.stats import gamma, multivariate_normal import pylab as", "(lambda_square_array_state))))) #### Step 5: Sample nu_array ### nu_array_state = invgamma.rvs(a=1, scale=1 + (1", "use this to initialise z (1, 2, 3, 4), based on the quantiles", "0, 0], np.zeros(shape=(20,)))) ## Realistic priors ## # Sampling from prior to define", "priors from Sherman's paper .... without prior on sigmas, so just taking mean", "= 1 ### Lists to store the samples Theta, Z, Lambda, Nu, Tau,", "0, 0, 0, 1.44, 0, 0, 0, 0, 0, -0.45, 0, 0, 0,", "0, 0, 0], np.zeros(shape=(20,)))) ## Realistic priors ## # Sampling from prior to", "the samples Theta, Z, Lambda, Nu, Tau, Eta = [], [], [], [],", "Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if extreme_case: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries_extreme(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y, X)))", "= cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X)) else: z, y, lambda_t, _, _ = cptimeseries(true_theta).simulate(X)", "EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0, f_0=theta_state) theta_state = Samples[-1] # define conditional likelihood for", "2) / (lambda_square_array_state))))) #### Step 5: Sample nu_array ### nu_array_state = invgamma.rvs(a=1, scale=1", "n_step_Gibbs = 1 ### Lists to store the samples Theta, Z, Lambda, Nu,", "prior. ## f_0 defines the present state of the Markov chain Samples =", "0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[-0.45, 0, 0, 0, 0, 0], scale=1/6) phi_lambda,", "successfully finished' ) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state))", "scale=1/6), \\ np.random.normal(size=(6,), loc=[-0.45, 0, 0, 0, 0, 0], scale=1/6) phi_lambda, phi_mu, gamma_lambda,", "scale=1) tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 / eta_state + 0.5", "0, 0], np.zeros(shape=(32,)))) true_theta = theta_0 Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0 =", "True location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model fields X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain fall", "from Sherman's paper .... without prior on sigmas, so just taking mean for", "for z loglikelihood_z = lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X) else: # define conditional", "Eta.append(copy.deepcopy(eta_state)) #### Parallel Case def parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z): possible_z[ind_non] = ind_z +", "#z_state an array of 0, 1 theta_state = theta_0 lambda_square_array_state = np.diag(Sigma_0) nu_array_state", "stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if extreme_case: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood:", "Multivariate normal used as the prior. ## f_0 defines the present state of", "#### Step 4: Sample tau ### tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2),", "( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state))", "= np.max(Y) bin_2 = (edge1<=Y) & (Y<=edge2) bin_3 = (edge2<Y) & (Y<=edge3) bin_4", "ind_z in range(9)) prob_z = np.sum(prob_z, axis=0) #print(prob_z) finite_indices = np.isfinite(prob_z) prob_z =", "Step 5: Sample nu_array ### nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state))", "= np.quantile(y_non_zero, 0.25) edge2 = np.quantile(y_non_zero, 0.5) edge3 = np.quantile(y_non_zero, 0.75) edge4 =", "0], scale=1/6) phi_lambda, phi_mu, gamma_lambda, gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\", "X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain fall Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) ##### Defining the", "2 z_state[bin_3] = 3 z_state[bin_4] = 4 z_state[zero_y_indices] = 0 #z_state an array", "z_state = np.ones(shape=Y.shape) z_state[bin_2] = 2 z_state[bin_3] = 3 z_state[bin_4] = 4 z_state[zero_y_indices]", "= (edge3<Y) & (Y<=edge4) z_state = np.ones(shape=Y.shape) z_state[bin_2] = 2 z_state[bin_3] = 3", "0, 0, 0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(32,)))) true_theta =", "copy.deepcopy(Eta[-1]) Sigma_0 = np.diag(lambda_square_array_state * tau_square_state) while True: try: #### Step 1: Sample", "theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0, 0, 0, 0,", "z, y, lambda_t, _, _ = cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X)) else: z, y,", "the prior. ## f_0 defines the present state of the Markov chain Samples", "print(str(ind_Gibbs)+'-st/th iteration successfully finished' ) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state))", "Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) #### Parallel Case def parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z): possible_z[ind_non] = ind_z", "else: z, y, lambda_t, _, _ = cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y, X)) #### Now", "for ind_Gibbs in range(n_step_Gibbs): #print(ind_Gibbs) ##### Copy the present state of the variables", "theta using Elliptic Slice Sampler #### if extreme_case: # define conditional likelihood for", "conditional likelihood for z loglikelihood_z = lambda z: cptimeseries(theta_state).loglikelihood(z, Y, X) # Step", "of the Markov chain Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0, f_0=theta_state) theta_state =", "[], [], [], [], [] # Extract zero/non-zero indices of y en =", "Sample eta ### eta_state = invgamma.rvs(a=1, scale=1 + (1 / tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration", "pylab as plt from Sampler import EllipticalSliceSampling from timeseries_cp import cptimeseries from timeseries_cp_extreme", "Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) #### Parallel Case def parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z):", "-0.45, 0, 0, 0, 0, 0], np.zeros(shape=(20,)))) ## Realistic priors ## # Sampling", "For non-zero y, get distribution of rainfalls and calculate quantiles #### Then use", "= (edge1<=Y) & (Y<=edge2) bin_3 = (edge2<Y) & (Y<=edge3) bin_4 = (edge3<Y) &", "else: # define conditional likelihood for theta loglikelihood_theta = lambda theta: cptimeseries(theta).loglikelihood(z_state, Y,", "#print(prob_z) finite_indices = np.isfinite(prob_z) prob_z = np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices])) possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices],", "based on the quantiles y_non_zero = Y[Y>0] edge1 = np.quantile(y_non_zero, 0.25) edge2 =", "3 z_state[bin_4] = 4 z_state[zero_y_indices] = 0 #z_state an array of 0, 1", "update theta and z one after another # number of steps Gibbs we", "def parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z): possible_z[ind_non] = ind_z + 1 #### This is", "and z for a Markov chain ## #### For non-zero y, get distribution", "import invgamma as invgamma import numpy as np from scipy.stats import gamma, multivariate_normal", "scale=1 + (1 / lambda_square_array_state)) #### Step 6: Sample eta ### eta_state =", "lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 / nu_array_state) + (theta_state ** 2) / (2 *", "2), scale=(1 / eta_state + 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) ####", "want to implment a Gibbs sample where we update theta and z one", "(Y==0) zero_y_indices = en[bool_y_zero] nonzero_y_indices = en[np.invert(bool_y_zero)] ## Lets first initialize theta and", "= 0 #z_state an array of 0, 1 theta_state = theta_0 lambda_square_array_state =", "= lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X) else: # define conditional likelihood for theta", "gamma_lambda, gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta =", "X) else: # define conditional likelihood for theta loglikelihood_theta = lambda theta: cptimeseries(theta).loglikelihood(z_state,", "Step 4: Sample tau ### tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1", "sample where we update theta and z one after another # number of", "np.quantile(y_non_zero, 0.75) edge4 = np.max(Y) bin_2 = (edge1<=Y) & (Y<=edge2) bin_3 = (edge2<Y)", "np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state))", "want to use n_step_Gibbs = 1 ### Lists to store the samples Theta,", "z_state = copy.deepcopy(Z[-1]) lambda_square_array_state = copy.deepcopy(Lambda[-1]) nu_array_state = copy.deepcopy(Nu[-1]) tau_square_state = copy.deepcopy(Tau[-1]) eta_state", "10)[finite_indices], p=prob_z / np.sum(prob_z)) z_state = possible_z except (RuntimeError, ValueError, TypeError, NameError, ZeroDivisionError,", "\\ np.random.normal(size=(6,), loc=[1.44, 0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[-0.45, 0,", "Y[Y>0] edge1 = np.quantile(y_non_zero, 0.25) edge2 = np.quantile(y_non_zero, 0.5) edge3 = np.quantile(y_non_zero, 0.75)", "lambda_square_array_state)) eta_state = invgamma.rvs(a=0.5, scale=1) tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1", "beta_mu, beta_omega = np.random.normal(size=(6,), loc=[-0.46, 0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,),", "= np.quantile(y_non_zero, 0.5) edge3 = np.quantile(y_non_zero, 0.75) edge4 = np.max(Y) bin_2 = (edge1<=Y)", "the loglikelihood (final step) prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z perc = 0.1", "& (Y<=edge3) bin_4 = (edge3<Y) & (Y<=edge4) z_state = np.ones(shape=Y.shape) z_state[bin_2] = 2", "np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) #### Simulated data if extreme_case: z, y, lambda_t, _, _", "the mean and var-cov matrix of Multivariate normal used as the prior. ##", "* ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) #### Step 5: Sample nu_array ### nu_array_state", "eta_state = invgamma.rvs(a=0.5, scale=1) tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 /", "nu_array ### nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) #### Step 6:", "prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z perc = 0.1 for ind_Gibbs in range(n_step_Gibbs):", "# Model fields X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain fall Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape)", "## # Sampling from prior to define a true_theta beta_lambda, beta_mu, beta_omega =", "Step 3: Sample lambda_aray ### lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 / nu_array_state) + (theta_state", "[], [] # Extract zero/non-zero indices of y en = np.arange(len(Y)) bool_y_zero =", "& (Y<=edge4) z_state = np.ones(shape=Y.shape) z_state[bin_2] = 2 z_state[bin_3] = 3 z_state[bin_4] =", "(RuntimeError, ValueError, TypeError, NameError, ZeroDivisionError, OSError): continue break #### Step 3: Sample lambda_aray", "Extract zero/non-zero indices of y en = np.arange(len(Y)) bool_y_zero = (Y==0) zero_y_indices =", "bin_4 = (edge3<Y) & (Y<=edge4) z_state = np.ones(shape=Y.shape) z_state[bin_2] = 2 z_state[bin_3] =", "Case def parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z): possible_z[ind_non] = ind_z + 1 #### This", "## Lets first initialize theta and z for a Markov chain ## ####", "= np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for ind_nonzero in nonzero_y: prob_z = np.zeros(9) prob_z = Parallel(n_jobs=4,", "from scipy.stats import invgamma as invgamma import numpy as np from scipy.stats import", "loglikelihood_theta = lambda theta: cptimeseries(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here Mean", "np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0, 0, 0, 0, 0, -0.45,", "1 theta_state = theta_0 lambda_square_array_state = np.diag(Sigma_0) nu_array_state = invgamma.rvs(a=1, scale=1 + (1", "cptimeseries_extreme(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here Mean and Sigma are the", "Samples[-1] # define conditional likelihood for z loglikelihood_z = lambda z: cptimeseries(theta_state).loglikelihood(z, Y,", "= invgamma.rvs(a=0.5, scale=1) tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 / eta_state", "for theta loglikelihood_theta = lambda theta: cptimeseries(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ##", "= copy.deepcopy(Z[-1]) lambda_square_array_state = copy.deepcopy(Lambda[-1]) nu_array_state = copy.deepcopy(Nu[-1]) tau_square_state = copy.deepcopy(Tau[-1]) eta_state =", "Eta = [], [], [], [], [], [] # Extract zero/non-zero indices of", "np.zeros(shape=(32,)))) true_theta = theta_0 Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0 = np.concatenate(([-0.46, 0,", "eta_state + 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) # Add to stored", "prob_z perc = 0.1 for ind_Gibbs in range(n_step_Gibbs): #print(ind_Gibbs) ##### Copy the present", "= theta_0 Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0 = np.concatenate(([-0.46, 0, 0, 0,", "present state of the Markov chain Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0, f_0=theta_state)", "store the samples Theta, Z, Lambda, Nu, Tau, Eta = [], [], [],", "[], [], [] # Extract zero/non-zero indices of y en = np.arange(len(Y)) bool_y_zero", "finite_indices = np.isfinite(prob_z) prob_z = np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices])) possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z", "prior on sigmas, so just taking mean for them if extreme_case: theta_0 =", "Samples[-1] # define conditional likelihood for z loglikelihood_z = lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y,", "for ind_z in range(9)) prob_z = np.sum(prob_z, axis=0) #print(prob_z) finite_indices = np.isfinite(prob_z) prob_z", "Markov chain ## #### For non-zero y, get distribution of rainfalls and calculate", "of y en = np.arange(len(Y)) bool_y_zero = (Y==0) zero_y_indices = en[bool_y_zero] nonzero_y_indices =", "np.sum(prob_z)) z_state = possible_z except (RuntimeError, ValueError, TypeError, NameError, ZeroDivisionError, OSError): continue break", "scale=(1 / eta_state + 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) # Add", "0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[1.44, 0, 0, 0, 0, 0], scale=1/6),", "#### if extreme_case: # define conditional likelihood for theta loglikelihood_theta = lambda theta:", "finished' ) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state))", "on sigmas, so just taking mean for them if extreme_case: theta_0 = np.concatenate(([-0.46,", "## #### For non-zero y, get distribution of rainfalls and calculate quantiles ####", "else: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0, 0, 0,", "sys from joblib import Parallel, delayed year = 2000 #For now, we're focusing", "## Here Mean and Sigma are the mean and var-cov matrix of Multivariate", "edge2 = np.quantile(y_non_zero, 0.5) edge3 = np.quantile(y_non_zero, 0.75) edge4 = np.max(Y) bin_2 =", "4: Sample tau ### tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 /", "np.diag(lambda_square_array_state * tau_square_state) while True: try: #### Step 1: Sample theta using Elliptic", "## f_0 defines the present state of the Markov chain Samples = EllipticalSliceSampling(LHD=loglikelihood_theta,", "0, 0, 0, 0, 0], np.zeros(shape=(32,)))) true_theta = theta_0 Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,)))))", "Sample/Update theta ## Here Mean and Sigma are the mean and var-cov matrix", "array]) Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) #### Simulated data if extreme_case: z, y,", "scale=1/6) phi_lambda, phi_mu, gamma_lambda, gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,),", "np.ones(shape=Y.shape) z_state[bin_2] = 2 z_state[bin_3] = 3 z_state[bin_4] = 4 z_state[zero_y_indices] = 0", "y, X)) else: z, y, lambda_t, _, _ = cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y, X))", "tau_square_state) while True: try: #### Step 1: Sample theta using Elliptic Slice Sampler", "(Y<=edge4) z_state = np.ones(shape=Y.shape) z_state[bin_2] = 2 z_state[bin_3] = 3 z_state[bin_4] = 4", "= loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z perc = 0.1 for ind_Gibbs in range(n_step_Gibbs): #print(ind_Gibbs)", "f_0 defines the present state of the Markov chain Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1,", "= copy.deepcopy(Theta[-1]) z_state = copy.deepcopy(Z[-1]) lambda_square_array_state = copy.deepcopy(Lambda[-1]) nu_array_state = copy.deepcopy(Nu[-1]) tau_square_state =", "theta loglikelihood_theta = lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here", "# Sampling from prior to define a true_theta beta_lambda, beta_mu, beta_omega = np.random.normal(size=(6,),", "lambda theta: cptimeseries(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here Mean and Sigma", "phi_mu, gamma_lambda, gamma_mu]: true_theta = np.concatenate([true_theta, array]) Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) ####", "theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here Mean and Sigma are", "/ (lambda_square_array_state))))) #### Step 5: Sample nu_array ### nu_array_state = invgamma.rvs(a=1, scale=1 +", "#[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z perc = 0.1 for ind_Gibbs in range(n_step_Gibbs): #print(ind_Gibbs) ##### Copy", "0.5) edge3 = np.quantile(y_non_zero, 0.75) edge4 = np.max(Y) bin_2 = (edge1<=Y) & (Y<=edge2)", "cptimeseries_extreme import sys from joblib import Parallel, delayed year = 2000 #For now,", "np from scipy.stats import gamma, multivariate_normal import pylab as plt from Sampler import", "edge3 = np.quantile(y_non_zero, 0.75) edge4 = np.max(Y) bin_2 = (edge1<=Y) & (Y<=edge2) bin_3", "define conditional likelihood for theta loglikelihood_theta = lambda theta: cptimeseries(theta).loglikelihood(z_state, Y, X) #", "without prior on sigmas, so just taking mean for them if extreme_case: theta_0", "0, 0, 0, 0, 0], scale=1/6) phi_lambda, phi_mu, gamma_lambda, gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\\", "#### Step 3: Sample lambda_aray ### lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1 / nu_array_state) +", "theta_state = theta_0 lambda_square_array_state = np.diag(Sigma_0) nu_array_state = invgamma.rvs(a=1, scale=1 + (1 /", "import sys from joblib import Parallel, delayed year = 2000 #For now, we're", "in z inside the loglikelihood (final step) prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z", "- np.min(prob_z[finite_indices])) possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z / np.sum(prob_z)) z_state = possible_z except", "true_theta = np.array([]) for array in [beta_lambda, beta_mu, beta_omega, phi_lambda, phi_mu, gamma_lambda, gamma_mu]:", "rainfalls and calculate quantiles #### Then use this to initialise z (1, 2,", "the prior in z inside the loglikelihood (final step) prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1]))", "for z loglikelihood_z = lambda z: cptimeseries(theta_state).loglikelihood(z, Y, X) # Step 2: Sample/Update", "#### theta_state = copy.deepcopy(Theta[-1]) z_state = copy.deepcopy(Z[-1]) lambda_square_array_state = copy.deepcopy(Lambda[-1]) nu_array_state = copy.deepcopy(Nu[-1])", "priors ## # Sampling from prior to define a true_theta beta_lambda, beta_mu, beta_omega", "np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta = np.array([]) for array in [beta_lambda, beta_mu, beta_omega,", "= np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) #### Simulated data if extreme_case: z, y, lambda_t, _,", "possible_z[ind_non] = ind_z + 1 #### This is wrong - include the prior", "copy.deepcopy(Theta[-1]) z_state = copy.deepcopy(Z[-1]) lambda_square_array_state = copy.deepcopy(Lambda[-1]) nu_array_state = copy.deepcopy(Nu[-1]) tau_square_state = copy.deepcopy(Tau[-1])", "likelihood for z loglikelihood_z = lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X) else: # define", "range(n_step_Gibbs): #print(ind_Gibbs) ##### Copy the present state of the variables to sample ..", "true_theta beta_lambda, beta_mu, beta_omega = np.random.normal(size=(6,), loc=[-0.46, 0, 0, 0, 0, 0], scale=1/6),", "np.arange(len(Y)) bool_y_zero = (Y==0) zero_y_indices = en[bool_y_zero] nonzero_y_indices = en[np.invert(bool_y_zero)] ## Lets first", "them if extreme_case: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0,", "0, 0, 0, 0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(32,)))) true_theta", "array in [beta_lambda, beta_mu, beta_omega, phi_lambda, phi_mu, gamma_lambda, gamma_mu]: true_theta = np.concatenate([true_theta, array])", "inside the loglikelihood (final step) prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z perc =", "z loglikelihood_z = lambda z: cptimeseries(theta_state).loglikelihood(z, Y, X) # Step 2: Sample/Update z", "to implment a Gibbs sample where we update theta and z one after", "[], [], [], [] # Extract zero/non-zero indices of y en = np.arange(len(Y))", "eta_state = copy.deepcopy(Eta[-1]) Sigma_0 = np.diag(lambda_square_array_state * tau_square_state) while True: try: #### Step", "from scipy.stats import gamma, multivariate_normal import pylab as plt from Sampler import EllipticalSliceSampling", "present state of the variables to sample .. #### theta_state = copy.deepcopy(Theta[-1]) z_state", "perc = 0.1 for ind_Gibbs in range(n_step_Gibbs): #print(ind_Gibbs) ##### Copy the present state", "single year extreme_case = True location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model fields X =", "loglikelihood_z)\\ for ind_z in range(9)) prob_z = np.sum(prob_z, axis=0) #print(prob_z) finite_indices = np.isfinite(prob_z)", "z, y, lambda_t, _, _ = cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y, X)) #### Now we", "return prob_z perc = 0.1 for ind_Gibbs in range(n_step_Gibbs): #print(ind_Gibbs) ##### Copy the", "= np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) ##### Defining the priors from Sherman's paper .... without prior", "= copy.deepcopy(Tau[-1]) eta_state = copy.deepcopy(Eta[-1]) Sigma_0 = np.diag(lambda_square_array_state * tau_square_state) while True: try:", "eta ### eta_state = invgamma.rvs(a=1, scale=1 + (1 / tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration successfully", "invgamma.rvs(a=0.5, scale=1) tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 / eta_state +", "loglikelihood_theta = lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here Mean", "Parallel Case def parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z): possible_z[ind_non] = ind_z + 1 ####", "from prior to define a true_theta beta_lambda, beta_mu, beta_omega = np.random.normal(size=(6,), loc=[-0.46, 0,", "gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta = np.array([])", "= ind_z + 1 #### This is wrong - include the prior in", "(final step) prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z perc = 0.1 for ind_Gibbs", "parallel_indices(ind_non, ind_z, possible_z, loglikelihood_z): possible_z[ind_non] = ind_z + 1 #### This is wrong", "0, 1.44, 0, 0, 0, 0, 0, -0.45, 0, 0, 0, 0, 0],", "0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[-0.45, 0, 0, 0, 0, 0],", "np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain fall Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) ##### Defining the priors from", "Lets first initialize theta and z for a Markov chain ## #### For", "on a single year extreme_case = True location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model fields", "from joblib import Parallel, delayed year = 2000 #For now, we're focusing on", "Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\\ for ind_z in range(9)) prob_z = np.sum(prob_z, axis=0)", "[], [], [], [], [], [] # Extract zero/non-zero indices of y en", "mean and var-cov matrix of Multivariate normal used as the prior. ## f_0", "loc=[-0.45, 0, 0, 0, 0, 0], scale=1/6) phi_lambda, phi_mu, gamma_lambda, gamma_mu = np.random.normal(size=(5,),", "** 2) / (2 * tau_square_state)) #### Step 4: Sample tau ### tau_square_state", "0], np.zeros(shape=(20,)))) ## Realistic priors ## # Sampling from prior to define a", "ind_Gibbs in range(n_step_Gibbs): #print(ind_Gibbs) ##### Copy the present state of the variables to", "scipy.stats import invgamma as invgamma import numpy as np from scipy.stats import gamma,", "lambda_square_array_state = copy.deepcopy(Lambda[-1]) nu_array_state = copy.deepcopy(Nu[-1]) tau_square_state = copy.deepcopy(Tau[-1]) eta_state = copy.deepcopy(Eta[-1]) Sigma_0", "0, 0, 1.44, 0, 0, 0, 0, 0, -0.45, 0, 0, 0, 0,", "year extreme_case = True location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model fields X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year))", "0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) # Add to stored samples Theta.append(copy.deepcopy(theta_state))", "ind_z, possible_z, loglikelihood_z)\\ for ind_z in range(9)) prob_z = np.sum(prob_z, axis=0) #print(prob_z) finite_indices", "0], scale=1/6), \\ np.random.normal(size=(6,), loc=[1.44, 0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,),", "where we update theta and z one after another # number of steps", "copy.deepcopy(Z[-1]) lambda_square_array_state = copy.deepcopy(Lambda[-1]) nu_array_state = copy.deepcopy(Nu[-1]) tau_square_state = copy.deepcopy(Tau[-1]) eta_state = copy.deepcopy(Eta[-1])", "Tau, Eta = [], [], [], [], [], [] # Extract zero/non-zero indices", "except (RuntimeError, ValueError, TypeError, NameError, ZeroDivisionError, OSError): continue break #### Step 3: Sample", "of steps Gibbs we want to use n_step_Gibbs = 1 ### Lists to", "2000 #For now, we're focusing on a single year extreme_case = True location", "prob_z = Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\\ for ind_z in range(9)) prob_z =", "2), scale=(1 / eta_state + 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) #", "= z_state nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for ind_nonzero in nonzero_y: prob_z = np.zeros(9)", "0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(20,)))) ## Realistic priors ##", "= np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta = np.array([]) for", "matrix of Multivariate normal used as the prior. ## f_0 defines the present", "* ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state))", "ind_z + 1 #### This is wrong - include the prior in z", "# define conditional likelihood for theta loglikelihood_theta = lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X)", "(1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) #### Simulated data if extreme_case: z, y, lambda_t, _, _ =", "Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) #### Parallel Case def parallel_indices(ind_non, ind_z, possible_z,", "size=int(perc*len(nonzero_y_indices))) for ind_nonzero in nonzero_y: prob_z = np.zeros(9) prob_z = Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z,", "variables to sample .. #### theta_state = copy.deepcopy(Theta[-1]) z_state = copy.deepcopy(Z[-1]) lambda_square_array_state =", "chain ## #### For non-zero y, get distribution of rainfalls and calculate quantiles", "Sampling from prior to define a true_theta beta_lambda, beta_mu, beta_omega = np.random.normal(size=(6,), loc=[-0.46,", "print(cptimeseries(true_theta).loglikelihood(z, y, X)) #### Now we want to implment a Gibbs sample where", "0, 0, 0, 0, 1.44, 0, 0, 0, 0, 0, -0.45, 0, 0,", "edge1 = np.quantile(y_non_zero, 0.25) edge2 = np.quantile(y_non_zero, 0.5) edge3 = np.quantile(y_non_zero, 0.75) edge4", "1 ### Lists to store the samples Theta, Z, Lambda, Nu, Tau, Eta", "X) # Step 2: Sample/Update z possible_z = z_state nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices)))", "quantiles y_non_zero = Y[Y>0] edge1 = np.quantile(y_non_zero, 0.25) edge2 = np.quantile(y_non_zero, 0.5) edge3", "lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here Mean and Sigma", "0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(20,)))) ## Realistic priors ## #", "Here Mean and Sigma are the mean and var-cov matrix of Multivariate normal", "Sampler #### if extreme_case: # define conditional likelihood for theta loglikelihood_theta = lambda", "conditional likelihood for z loglikelihood_z = lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X) else: #", "zero/non-zero indices of y en = np.arange(len(Y)) bool_y_zero = (Y==0) zero_y_indices = en[bool_y_zero]", "Nu, Tau, Eta = [], [], [], [], [], [] # Extract zero/non-zero", "tau_square_state = copy.deepcopy(Tau[-1]) eta_state = copy.deepcopy(Eta[-1]) Sigma_0 = np.diag(lambda_square_array_state * tau_square_state) while True:", "import pylab as plt from Sampler import EllipticalSliceSampling from timeseries_cp import cptimeseries from", "#### Then use this to initialise z (1, 2, 3, 4), based on", "z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X) else: # define conditional likelihood for theta loglikelihood_theta =", "using Elliptic Slice Sampler #### if extreme_case: # define conditional likelihood for theta", "= lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here Mean and", "(2 * tau_square_state)) #### Step 4: Sample tau ### tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] +", "tau ### tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 / eta_state +", "possible_z, loglikelihood_z)\\ for ind_z in range(9)) prob_z = np.sum(prob_z, axis=0) #print(prob_z) finite_indices =", "extreme_case: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0, 0, 0,", "Defining the priors from Sherman's paper .... without prior on sigmas, so just", "np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0,", "= (edge2<Y) & (Y<=edge3) bin_4 = (edge3<Y) & (Y<=edge4) z_state = np.ones(shape=Y.shape) z_state[bin_2]", "to sample .. #### theta_state = copy.deepcopy(Theta[-1]) z_state = copy.deepcopy(Z[-1]) lambda_square_array_state = copy.deepcopy(Lambda[-1])", "= copy.deepcopy(Eta[-1]) Sigma_0 = np.diag(lambda_square_array_state * tau_square_state) while True: try: #### Step 1:", "to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if extreme_case: print(str(ind_Gibbs)+'-st/th sample", "extreme_case = True location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model fields X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) #", "np.max(Y) bin_2 = (edge1<=Y) & (Y<=edge2) bin_3 = (edge2<Y) & (Y<=edge3) bin_4 =", "Model fields X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain fall Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) #####", "as invgamma import numpy as np from scipy.stats import gamma, multivariate_normal import pylab", "timeseries_cp import cptimeseries from timeseries_cp_extreme import cptimeseries_extreme import sys from joblib import Parallel,", "theta loglikelihood_theta = lambda theta: cptimeseries(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here", "np.random.normal(size=(6,), loc=[1.44, 0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[-0.45, 0, 0,", "4 z_state[zero_y_indices] = 0 #z_state an array of 0, 1 theta_state = theta_0", "used as the prior. ## f_0 defines the present state of the Markov", "define a true_theta beta_lambda, beta_mu, beta_omega = np.random.normal(size=(6,), loc=[-0.46, 0, 0, 0, 0,", "invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 / eta_state + 0.5 * ( np.sum(np.power(theta_state,", "for ind_nonzero in nonzero_y: prob_z = np.zeros(9) prob_z = Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z,", "0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(32,)))) true_theta = theta_0 Sigma_0", "samples Theta, Z, Lambda, Nu, Tau, Eta = [], [], [], [], [],", "stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) #### Parallel Case def parallel_indices(ind_non,", "0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(32,)))) true_theta = theta_0 Sigma_0 =", "X) # Sample/Update theta ## Here Mean and Sigma are the mean and", "#### Step 6: Sample eta ### eta_state = invgamma.rvs(a=1, scale=1 + (1 /", "True: try: #### Step 1: Sample theta using Elliptic Slice Sampler #### if", "paper .... without prior on sigmas, so just taking mean for them if", "import Parallel, delayed year = 2000 #For now, we're focusing on a single", "print(Y.shape) ##### Defining the priors from Sherman's paper .... without prior on sigmas,", "Sigma are the mean and var-cov matrix of Multivariate normal used as the", "2) / (2 * tau_square_state)) #### Step 4: Sample tau ### tau_square_state =", "scale=(1 / nu_array_state) + (theta_state ** 2) / (2 * tau_square_state)) #### Step", "= np.concatenate([true_theta, array]) Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) #### Simulated data if extreme_case:", "if extreme_case: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44, 0, 0,", "en[bool_y_zero] nonzero_y_indices = en[np.invert(bool_y_zero)] ## Lets first initialize theta and z for a", ".. #### theta_state = copy.deepcopy(Theta[-1]) z_state = copy.deepcopy(Z[-1]) lambda_square_array_state = copy.deepcopy(Lambda[-1]) nu_array_state =", "* tau_square_state) while True: try: #### Step 1: Sample theta using Elliptic Slice", "Step 2: Sample/Update z possible_z = z_state nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for ind_nonzero", "of rainfalls and calculate quantiles #### Then use this to initialise z (1,", "-0.45, 0, 0, 0, 0, 0], np.zeros(shape=(32,)))) true_theta = theta_0 Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)),", "extreme_case: z, y, lambda_t, _, _ = cptimeseries_extreme(true_theta).simulate(X) print(cptimeseries_extreme(true_theta).loglikelihood(z, y, X)) else: z,", "z_state nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for ind_nonzero in nonzero_y: prob_z = np.zeros(9) prob_z", "EllipticalSliceSampling from timeseries_cp import cptimeseries from timeseries_cp_extreme import cptimeseries_extreme import sys from joblib", "1) / 2), scale=(1 / eta_state + 0.5 * ( np.sum(np.power(theta_state, 2) /", "ind_nonzero in nonzero_y: prob_z = np.zeros(9) prob_z = Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero, ind_z, possible_z, loglikelihood_z)\\", "* tau_square_state)) #### Step 4: Sample tau ### tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1)", "_, _ = cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y, X)) #### Now we want to implment", "theta: cptimeseries(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here Mean and Sigma are", "one after another # number of steps Gibbs we want to use n_step_Gibbs", "for a Markov chain ## #### For non-zero y, get distribution of rainfalls", "0, 0, 0], scale=1/6) phi_lambda, phi_mu, gamma_lambda, gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\", "Y, X) else: # define conditional likelihood for theta loglikelihood_theta = lambda theta:", "Sigma=Sigma_0, f_0=theta_state) theta_state = Samples[-1] # define conditional likelihood for z loglikelihood_z =", "f_0=theta_state) theta_state = Samples[-1] # define conditional likelihood for z loglikelihood_z = lambda", "### Lists to store the samples Theta, Z, Lambda, Nu, Tau, Eta =", "scale=1 + (1 / lambda_square_array_state)) eta_state = invgamma.rvs(a=0.5, scale=1) tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] +", "/ (2 * tau_square_state)) #### Step 4: Sample tau ### tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0]", "a Markov chain ## #### For non-zero y, get distribution of rainfalls and", "loglikelihood_z): possible_z[ind_non] = ind_z + 1 #### This is wrong - include the", "+ 1 #### This is wrong - include the prior in z inside", "fields X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain fall Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) ##### Defining", "copy from scipy.stats import invgamma as invgamma import numpy as np from scipy.stats", "5: Sample nu_array ### nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) ####", "beta_lambda, beta_mu, beta_omega = np.random.normal(size=(6,), loc=[-0.46, 0, 0, 0, 0, 0], scale=1/6), \\", "zero_y_indices = en[bool_y_zero] nonzero_y_indices = en[np.invert(bool_y_zero)] ## Lets first initialize theta and z", "0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[1.44, 0, 0, 0, 0, 0], scale=1/6), \\", "loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z perc = 0.1 for ind_Gibbs in range(n_step_Gibbs): #print(ind_Gibbs) #####", "= np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z / np.sum(prob_z)) z_state = possible_z except (RuntimeError, ValueError, TypeError,", "while True: try: #### Step 1: Sample theta using Elliptic Slice Sampler ####", "0, 0], scale=1/6) phi_lambda, phi_mu, gamma_lambda, gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,),", "Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if extreme_case: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries_extreme(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y, X))) else: print(str(ind_Gibbs)+'-st/th", "possible_z = z_state nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for ind_nonzero in nonzero_y: prob_z =", "Markov chain Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0, f_0=theta_state) theta_state = Samples[-1] #", "= 3 z_state[bin_4] = 4 z_state[zero_y_indices] = 0 #z_state an array of 0,", "chain Samples = EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0, f_0=theta_state) theta_state = Samples[-1] # define", "= 4 z_state[zero_y_indices] = 0 #z_state an array of 0, 1 theta_state =", "0 #z_state an array of 0, 1 theta_state = theta_0 lambda_square_array_state = np.diag(Sigma_0)", "\\ np.random.normal(size=(6,), loc=[-0.45, 0, 0, 0, 0, 0], scale=1/6) phi_lambda, phi_mu, gamma_lambda, gamma_mu", "we want to use n_step_Gibbs = 1 ### Lists to store the samples", "an array of 0, 1 theta_state = theta_0 lambda_square_array_state = np.diag(Sigma_0) nu_array_state =", "invgamma as invgamma import numpy as np from scipy.stats import gamma, multivariate_normal import", "copy.deepcopy(Tau[-1]) eta_state = copy.deepcopy(Eta[-1]) Sigma_0 = np.diag(lambda_square_array_state * tau_square_state) while True: try: ####", "0.1 for ind_Gibbs in range(n_step_Gibbs): #print(ind_Gibbs) ##### Copy the present state of the", "z: cptimeseries(theta_state).loglikelihood(z, Y, X) # Step 2: Sample/Update z possible_z = z_state nonzero_y", "+ 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) # Add to stored samples", "invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) eta_state = invgamma.rvs(a=0.5, scale=1) tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0]", "(1 / lambda_square_array_state)) #### Step 6: Sample eta ### eta_state = invgamma.rvs(a=1, scale=1", "and calculate quantiles #### Then use this to initialise z (1, 2, 3,", "### tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 / eta_state + 0.5", "1: Sample theta using Elliptic Slice Sampler #### if extreme_case: # define conditional", "to define a true_theta beta_lambda, beta_mu, beta_omega = np.random.normal(size=(6,), loc=[-0.46, 0, 0, 0,", "fall Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) ##### Defining the priors from Sherman's paper ....", "0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[-0.45, 0, 0, 0, 0,", "scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta = np.array([]) for array in [beta_lambda, beta_mu,", "(theta_state ** 2) / (2 * tau_square_state)) #### Step 4: Sample tau ###", "use n_step_Gibbs = 1 ### Lists to store the samples Theta, Z, Lambda,", "+ (1 / lambda_square_array_state)) #### Step 6: Sample eta ### eta_state = invgamma.rvs(a=1,", "0.25) edge2 = np.quantile(y_non_zero, 0.5) edge3 = np.quantile(y_non_zero, 0.75) edge4 = np.max(Y) bin_2", "in range(n_step_Gibbs): #print(ind_Gibbs) ##### Copy the present state of the variables to sample", "cptimeseries from timeseries_cp_extreme import cptimeseries_extreme import sys from joblib import Parallel, delayed year", "invgamma.rvs(a=1, scale=1 + (1 / tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration successfully finished' ) # Add", "Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if extreme_case: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries_extreme(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y, X))) else:", "### eta_state = invgamma.rvs(a=1, scale=1 + (1 / tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration successfully finished'", "Step 1: Sample theta using Elliptic Slice Sampler #### if extreme_case: # define", "theta_0 Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0,", "# Rain fall Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) ##### Defining the priors from Sherman's", "scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)) true_theta = np.array([]) for array in [beta_lambda, beta_mu, beta_omega, phi_lambda,", "= 2000 #For now, we're focusing on a single year extreme_case = True", "we update theta and z one after another # number of steps Gibbs", "Gibbs sample where we update theta and z one after another # number", "(lambda_square_array_state))))) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) ####", "= lambda theta: cptimeseries(theta).loglikelihood(z_state, Y, X) # Sample/Update theta ## Here Mean and", "sigmas, so just taking mean for them if extreme_case: theta_0 = np.concatenate(([-0.46, 0,", "= np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain fall Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) ##### Defining the priors", "joblib import Parallel, delayed year = 2000 #For now, we're focusing on a", "= np.quantile(y_non_zero, 0.75) edge4 = np.max(Y) bin_2 = (edge1<=Y) & (Y<=edge2) bin_3 =", "Copy the present state of the variables to sample .. #### theta_state =", "ValueError, TypeError, NameError, ZeroDivisionError, OSError): continue break #### Step 3: Sample lambda_aray ###", "1 #### This is wrong - include the prior in z inside the", "## Realistic priors ## # Sampling from prior to define a true_theta beta_lambda,", "scipy.stats import gamma, multivariate_normal import pylab as plt from Sampler import EllipticalSliceSampling from", "number of steps Gibbs we want to use n_step_Gibbs = 1 ### Lists", "/ lambda_square_array_state)) eta_state = invgamma.rvs(a=0.5, scale=1) tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2),", "to store the samples Theta, Z, Lambda, Nu, Tau, Eta = [], [],", "ZeroDivisionError, OSError): continue break #### Step 3: Sample lambda_aray ### lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]),", "/ 2), scale=(1 / eta_state + 0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state)))))", "Eta.append(copy.deepcopy(eta_state)) if extreme_case: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries_extreme(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y, X))) else: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y,", "= EllipticalSliceSampling(LHD=loglikelihood_theta, n=1, Mean=theta_0, Sigma=Sigma_0, f_0=theta_state) theta_state = Samples[-1] # define conditional likelihood", "2: Sample/Update z possible_z = z_state nonzero_y = np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for ind_nonzero in", "edge4 = np.max(Y) bin_2 = (edge1<=Y) & (Y<=edge2) bin_3 = (edge2<Y) & (Y<=edge3)", "import EllipticalSliceSampling from timeseries_cp import cptimeseries from timeseries_cp_extreme import cptimeseries_extreme import sys from", "is wrong - include the prior in z inside the loglikelihood (final step)", "Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) ##### Defining the priors from Sherman's paper .... without", "copy.deepcopy(Lambda[-1]) nu_array_state = copy.deepcopy(Nu[-1]) tau_square_state = copy.deepcopy(Tau[-1]) eta_state = copy.deepcopy(Eta[-1]) Sigma_0 = np.diag(lambda_square_array_state", "Sample nu_array ### nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) #### Step", "beta_mu, beta_omega, phi_lambda, phi_mu, gamma_lambda, gamma_mu]: true_theta = np.concatenate([true_theta, array]) Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)),", "+ (1 / tau_square_state)) print(str(ind_Gibbs)+'-st/th iteration successfully finished' ) # Add to stored", "Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) if extreme_case: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries_extreme(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y, X))) else: print(str(ind_Gibbs)+'-st/th sample", "gamma_mu]: true_theta = np.concatenate([true_theta, array]) Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) #### Simulated data", "we're focusing on a single year extreme_case = True location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" #", "np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) ##### Defining the priors from Sherman's paper .... without prior on", "Rain fall Y = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\Rainfall_Cardiff_{}.npy'.format(year)) print(Y.shape) ##### Defining the priors from Sherman's paper", "0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[1.44, 0, 0, 0, 0,", "0, 0, 0, 0, -0.45, 0, 0, 0, 0, 0], np.zeros(shape=(20,)))) ## Realistic", "lambda_square_array_state)) #### Step 6: Sample eta ### eta_state = invgamma.rvs(a=1, scale=1 + (1", "code, with horseshoe prior \"\"\" import copy from scipy.stats import invgamma as invgamma", "Sample tau ### tau_square_state = invgamma.rvs(a=((lambda_square_array_state.shape[0] + 1) / 2), scale=(1 / eta_state", "Realistic priors ## # Sampling from prior to define a true_theta beta_lambda, beta_mu,", "true_theta = np.concatenate([true_theta, array]) Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) #### Simulated data if", "= True location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model fields X = np.load('C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\Data\\\\Data\\\\model_fields_Cardiff_{}.npy'.format(year)) # Rain", "[beta_lambda, beta_mu, beta_omega, phi_lambda, phi_mu, gamma_lambda, gamma_mu]: true_theta = np.concatenate([true_theta, array]) Sigma_0 =", "of 0, 1 theta_state = theta_0 lambda_square_array_state = np.diag(Sigma_0) nu_array_state = invgamma.rvs(a=1, scale=1", "theta_state = Samples[-1] # define conditional likelihood for z loglikelihood_z = lambda z:", "theta and z for a Markov chain ## #### For non-zero y, get", "z loglikelihood_z = lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X) else: # define conditional likelihood", "= np.ones(shape=Y.shape) z_state[bin_2] = 2 z_state[bin_3] = 3 z_state[bin_4] = 4 z_state[zero_y_indices] =", "cptimeseries(theta_state).loglikelihood(z, Y, X) # Step 2: Sample/Update z possible_z = z_state nonzero_y =", "0, 0, 0, 0], scale=1/6) phi_lambda, phi_mu, gamma_lambda, gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,),", "0.5 * ( np.sum(np.power(theta_state, 2) / (lambda_square_array_state))))) #### Step 5: Sample nu_array ###", "z inside the loglikelihood (final step) prob_z[ind_z] = loglikelihood_z(possible_z) #[0]*np.log(np.random.poisson(loglikelihood_z(possible_z)[1])) return prob_z perc", "= np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices])) possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z / np.sum(prob_z)) z_state =", "(edge1<=Y) & (Y<=edge2) bin_3 = (edge2<Y) & (Y<=edge3) bin_4 = (edge3<Y) & (Y<=edge4)", "initialize theta and z for a Markov chain ## #### For non-zero y,", "with horseshoe prior \"\"\" import copy from scipy.stats import invgamma as invgamma import", "Then use this to initialise z (1, 2, 3, 4), based on the", "2) / (lambda_square_array_state))))) # Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state))", "= Samples[-1] # define conditional likelihood for z loglikelihood_z = lambda z: cptimeseries(theta_state).loglikelihood(z,", "normal used as the prior. ## f_0 defines the present state of the", "z_state[bin_2] = 2 z_state[bin_3] = 3 z_state[bin_4] = 4 z_state[zero_y_indices] = 0 #z_state", "Sigma_0 = np.diag(np.concatenate(((1/6)*np.ones(shape=(18,)), (1/(1.3*65))*np.ones(shape=(20,))))) print(np.diag(Sigma_0).shape) #### Simulated data if extreme_case: z, y, lambda_t,", "focusing on a single year extreme_case = True location = 'C:\\\\Users\\\\klera\\\\Documents\\\\GitHub\\\\ML_Extreme_Climate_Events\\\\code\\\\images\\\\year_'+str(year)+\"\\\\\" # Model", "loc=[-0.46, 0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[1.44, 0, 0, 0,", "import numpy as np from scipy.stats import gamma, multivariate_normal import pylab as plt", "sample .. #### theta_state = copy.deepcopy(Theta[-1]) z_state = copy.deepcopy(Z[-1]) lambda_square_array_state = copy.deepcopy(Lambda[-1]) nu_array_state", "phi_lambda, phi_mu, gamma_lambda, gamma_mu = np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65)),\\ np.random.normal(size=(5,), scale=1/(1.3*65))", "# define conditional likelihood for z loglikelihood_z = lambda z: cptimeseries_extreme(theta_state).loglikelihood(z, Y, X)", "p=prob_z / np.sum(prob_z)) z_state = possible_z except (RuntimeError, ValueError, TypeError, NameError, ZeroDivisionError, OSError):", "# Sample/Update theta ## Here Mean and Sigma are the mean and var-cov", "from Sampler import EllipticalSliceSampling from timeseries_cp import cptimeseries from timeseries_cp_extreme import cptimeseries_extreme import", "beta_omega = np.random.normal(size=(6,), loc=[-0.46, 0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[1.44,", "possible_z, loglikelihood_z): possible_z[ind_non] = ind_z + 1 #### This is wrong - include", "np.min(prob_z[finite_indices])) possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z / np.sum(prob_z)) z_state = possible_z except (RuntimeError,", "nu_array_state = copy.deepcopy(Nu[-1]) tau_square_state = copy.deepcopy(Tau[-1]) eta_state = copy.deepcopy(Eta[-1]) Sigma_0 = np.diag(lambda_square_array_state *", "multivariate_normal import pylab as plt from Sampler import EllipticalSliceSampling from timeseries_cp import cptimeseries", "likelihood for z loglikelihood_z = lambda z: cptimeseries(theta_state).loglikelihood(z, Y, X) # Step 2:", "/ np.sum(prob_z)) z_state = possible_z except (RuntimeError, ValueError, TypeError, NameError, ZeroDivisionError, OSError): continue", "= np.diag(np.concatenate(((1/6)*np.ones(shape=(30,)), (1/(1.3*65))*np.ones(shape=(20,))))) else: theta_0 = np.concatenate(([-0.46, 0, 0, 0, 0, 0, 1.44,", "# define conditional likelihood for theta loglikelihood_theta = lambda theta: cptimeseries(theta).loglikelihood(z_state, Y, X)", "of the variables to sample .. #### theta_state = copy.deepcopy(Theta[-1]) z_state = copy.deepcopy(Z[-1])", "and Sigma are the mean and var-cov matrix of Multivariate normal used as", "horseshoe prior \"\"\" import copy from scipy.stats import invgamma as invgamma import numpy", "y, get distribution of rainfalls and calculate quantiles #### Then use this to", "theta_0 lambda_square_array_state = np.diag(Sigma_0) nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) eta_state", "as np from scipy.stats import gamma, multivariate_normal import pylab as plt from Sampler", "### nu_array_state = invgamma.rvs(a=1, scale=1 + (1 / lambda_square_array_state)) #### Step 6: Sample", "print(np.diag(Sigma_0).shape) #### Simulated data if extreme_case: z, y, lambda_t, _, _ = cptimeseries_extreme(true_theta).simulate(X)", "en = np.arange(len(Y)) bool_y_zero = (Y==0) zero_y_indices = en[bool_y_zero] nonzero_y_indices = en[np.invert(bool_y_zero)] ##", "theta and z one after another # number of steps Gibbs we want", "np.random.choice(nonzero_y_indices, size=int(perc*len(nonzero_y_indices))) for ind_nonzero in nonzero_y: prob_z = np.zeros(9) prob_z = Parallel(n_jobs=4, prefer=\"threads\")(delayed(parallel_indices)(ind_nonzero,", "= np.random.normal(size=(6,), loc=[-0.46, 0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[1.44, 0,", "= en[bool_y_zero] nonzero_y_indices = en[np.invert(bool_y_zero)] ## Lets first initialize theta and z for", "= 2 z_state[bin_3] = 3 z_state[bin_4] = 4 z_state[zero_y_indices] = 0 #z_state an", "so just taking mean for them if extreme_case: theta_0 = np.concatenate(([-0.46, 0, 0,", "the variables to sample .. #### theta_state = copy.deepcopy(Theta[-1]) z_state = copy.deepcopy(Z[-1]) lambda_square_array_state", "calculate quantiles #### Then use this to initialise z (1, 2, 3, 4),", "if extreme_case: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries_extreme(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y, X))) else: print(str(ind_Gibbs)+'-st/th sample LogLikeliHood: '+str(cptimeseries(Theta[ind_Gibbs]).loglikelihood(Z[ind_Gibbs],Y, X)))", "= copy.deepcopy(Lambda[-1]) nu_array_state = copy.deepcopy(Nu[-1]) tau_square_state = copy.deepcopy(Tau[-1]) eta_state = copy.deepcopy(Eta[-1]) Sigma_0 =", "\"\"\" Improved sampling code, with horseshoe prior \"\"\" import copy from scipy.stats import", "# Add to stored samples Theta.append(copy.deepcopy(theta_state)) Z.append(copy.deepcopy(z_state)) Lambda.append(copy.deepcopy(lambda_square_array_state)) Nu.append(copy.deepcopy(nu_array_state)) Tau.append(copy.deepcopy(tau_square_state)) Eta.append(copy.deepcopy(eta_state)) #### Parallel", "np.exp(prob_z[finite_indices] - np.min(prob_z[finite_indices])) possible_z[ind_nonzero] = np.random.choice(a=np.arange(1, 10)[finite_indices], p=prob_z / np.sum(prob_z)) z_state = possible_z", "from timeseries_cp import cptimeseries from timeseries_cp_extreme import cptimeseries_extreme import sys from joblib import", "OSError): continue break #### Step 3: Sample lambda_aray ### lambda_square_array_state = invgamma.rvs(a=np.ones(lambda_square_array_state.shape[0]), scale=(1", "in [beta_lambda, beta_mu, beta_omega, phi_lambda, phi_mu, gamma_lambda, gamma_mu]: true_theta = np.concatenate([true_theta, array]) Sigma_0", "np.random.normal(size=(6,), loc=[-0.46, 0, 0, 0, 0, 0], scale=1/6), \\ np.random.normal(size=(6,), loc=[1.44, 0, 0,", "conditional likelihood for theta loglikelihood_theta = lambda theta: cptimeseries_extreme(theta).loglikelihood(z_state, Y, X) # Sample/Update", "Step 6: Sample eta ### eta_state = invgamma.rvs(a=1, scale=1 + (1 / tau_square_state))", "nonzero_y_indices = en[np.invert(bool_y_zero)] ## Lets first initialize theta and z for a Markov", "cptimeseries(true_theta).simulate(X) print(cptimeseries(true_theta).loglikelihood(z, y, X)) #### Now we want to implment a Gibbs sample" ]
[ "class Solution: def climbStairs(self, n: int) -> int: # 不管是走几个台阶. # 要么走1步, 要么走2步", "-> 1 # x=2 -> 2 if n == 1: return 1 if", "2 idx = 3 while(idx <= n): # print(idx) dp[idx] = dp[idx-1] +", "[70] Climbing Stairs # # @lc code=start class Solution: def climbStairs(self, n: int)", "dp[1] = 1 dp[2] = 2 idx = 3 while(idx <= n): #", "# # @lc app=leetcode id=70 lang=python3 # # [70] Climbing Stairs # #", "<= n): # print(idx) dp[idx] = dp[idx-1] + dp[idx-2] idx += 1 return", "dp[2] = 2 idx = 3 while(idx <= n): # print(idx) dp[idx] =", "if n == 2: return 2 dp = [0]*(n+1) dp[1] = 1 dp[2]", "-> 2 if n == 1: return 1 if n == 2: return", "要么走1步, 要么走2步 # 那么对于n级台阶, 只有两种情况 # 从n-1级走1步, 或者, 从n-2级台阶走2步 # 第 n 阶的情况只能是上面两种情况之和", "= 3 while(idx <= n): # print(idx) dp[idx] = dp[idx-1] + dp[idx-2] idx", "def climbStairs(self, n: int) -> int: # 不管是走几个台阶. # 要么走1步, 要么走2步 # 那么对于n级台阶,", "# print(idx) dp[idx] = dp[idx-1] + dp[idx-2] idx += 1 return dp[n] def", "@lc app=leetcode id=70 lang=python3 # # [70] Climbing Stairs # # @lc code=start", "# [70] Climbing Stairs # # @lc code=start class Solution: def climbStairs(self, n:", "# 那么对于n级台阶, 只有两种情况 # 从n-1级走1步, 或者, 从n-2级台阶走2步 # 第 n 阶的情况只能是上面两种情况之和 # 类似汉诺塔问题", "Climbing Stairs # # @lc code=start class Solution: def climbStairs(self, n: int) ->", "# 第 n 阶的情况只能是上面两种情况之和 # 类似汉诺塔问题 # x=1 -> 1 # x=2 ->", "= 2 idx = 3 while(idx <= n): # print(idx) dp[idx] = dp[idx-1]", "阶的情况只能是上面两种情况之和 # 类似汉诺塔问题 # x=1 -> 1 # x=2 -> 2 if n", "Solution: def climbStairs(self, n: int) -> int: # 不管是走几个台阶. # 要么走1步, 要么走2步 #", "== 2) assert(self.climbStairs(3) == 3) assert(self.climbStairs(4) == 5) sol = Solution() sol.test() #", "print(idx) dp[idx] = dp[idx-1] + dp[idx-2] idx += 1 return dp[n] def test(self):", "app=leetcode id=70 lang=python3 # # [70] Climbing Stairs # # @lc code=start class", "从n-1级走1步, 或者, 从n-2级台阶走2步 # 第 n 阶的情况只能是上面两种情况之和 # 类似汉诺塔问题 # x=1 -> 1", "climbStairs(self, n: int) -> int: # 不管是走几个台阶. # 要么走1步, 要么走2步 # 那么对于n级台阶, 只有两种情况", "1 return dp[n] def test(self): assert(self.climbStairs(2) == 2) assert(self.climbStairs(3) == 3) assert(self.climbStairs(4) ==", "idx = 3 while(idx <= n): # print(idx) dp[idx] = dp[idx-1] + dp[idx-2]", "n == 2: return 2 dp = [0]*(n+1) dp[1] = 1 dp[2] =", "dp[idx] = dp[idx-1] + dp[idx-2] idx += 1 return dp[n] def test(self): assert(self.climbStairs(2)", "int) -> int: # 不管是走几个台阶. # 要么走1步, 要么走2步 # 那么对于n级台阶, 只有两种情况 # 从n-1级走1步,", "-> int: # 不管是走几个台阶. # 要么走1步, 要么走2步 # 那么对于n级台阶, 只有两种情况 # 从n-1级走1步, 或者,", "1 # x=2 -> 2 if n == 1: return 1 if n", "dp = [0]*(n+1) dp[1] = 1 dp[2] = 2 idx = 3 while(idx", "return dp[n] def test(self): assert(self.climbStairs(2) == 2) assert(self.climbStairs(3) == 3) assert(self.climbStairs(4) == 5)", "1 if n == 2: return 2 dp = [0]*(n+1) dp[1] = 1", "lang=python3 # # [70] Climbing Stairs # # @lc code=start class Solution: def", "2: return 2 dp = [0]*(n+1) dp[1] = 1 dp[2] = 2 idx", "只有两种情况 # 从n-1级走1步, 或者, 从n-2级台阶走2步 # 第 n 阶的情况只能是上面两种情况之和 # 类似汉诺塔问题 # x=1", "== 2: return 2 dp = [0]*(n+1) dp[1] = 1 dp[2] = 2", "= dp[idx-1] + dp[idx-2] idx += 1 return dp[n] def test(self): assert(self.climbStairs(2) ==", "= 1 dp[2] = 2 idx = 3 while(idx <= n): # print(idx)", "那么对于n级台阶, 只有两种情况 # 从n-1级走1步, 或者, 从n-2级台阶走2步 # 第 n 阶的情况只能是上面两种情况之和 # 类似汉诺塔问题 #", "return 2 dp = [0]*(n+1) dp[1] = 1 dp[2] = 2 idx =", "# 不管是走几个台阶. # 要么走1步, 要么走2步 # 那么对于n级台阶, 只有两种情况 # 从n-1级走1步, 或者, 从n-2级台阶走2步 #", "从n-2级台阶走2步 # 第 n 阶的情况只能是上面两种情况之和 # 类似汉诺塔问题 # x=1 -> 1 # x=2", "x=1 -> 1 # x=2 -> 2 if n == 1: return 1", "2 if n == 1: return 1 if n == 2: return 2", "Stairs # # @lc code=start class Solution: def climbStairs(self, n: int) -> int:", "n == 1: return 1 if n == 2: return 2 dp =", "[0]*(n+1) dp[1] = 1 dp[2] = 2 idx = 3 while(idx <= n):", "int: # 不管是走几个台阶. # 要么走1步, 要么走2步 # 那么对于n级台阶, 只有两种情况 # 从n-1级走1步, 或者, 从n-2级台阶走2步", "dp[idx-1] + dp[idx-2] idx += 1 return dp[n] def test(self): assert(self.climbStairs(2) == 2)", "n 阶的情况只能是上面两种情况之和 # 类似汉诺塔问题 # x=1 -> 1 # x=2 -> 2 if", "n: int) -> int: # 不管是走几个台阶. # 要么走1步, 要么走2步 # 那么对于n级台阶, 只有两种情况 #", "或者, 从n-2级台阶走2步 # 第 n 阶的情况只能是上面两种情况之和 # 类似汉诺塔问题 # x=1 -> 1 #", "n): # print(idx) dp[idx] = dp[idx-1] + dp[idx-2] idx += 1 return dp[n]", "+ dp[idx-2] idx += 1 return dp[n] def test(self): assert(self.climbStairs(2) == 2) assert(self.climbStairs(3)", "# x=2 -> 2 if n == 1: return 1 if n ==", "# 从n-1级走1步, 或者, 从n-2级台阶走2步 # 第 n 阶的情况只能是上面两种情况之和 # 类似汉诺塔问题 # x=1 ->", "2 dp = [0]*(n+1) dp[1] = 1 dp[2] = 2 idx = 3", "3 while(idx <= n): # print(idx) dp[idx] = dp[idx-1] + dp[idx-2] idx +=", "= [0]*(n+1) dp[1] = 1 dp[2] = 2 idx = 3 while(idx <=", "# # [70] Climbing Stairs # # @lc code=start class Solution: def climbStairs(self,", "类似汉诺塔问题 # x=1 -> 1 # x=2 -> 2 if n == 1:", "不管是走几个台阶. # 要么走1步, 要么走2步 # 那么对于n级台阶, 只有两种情况 # 从n-1级走1步, 或者, 从n-2级台阶走2步 # 第", "# # @lc code=start class Solution: def climbStairs(self, n: int) -> int: #", "return 1 if n == 2: return 2 dp = [0]*(n+1) dp[1] =", "# x=1 -> 1 # x=2 -> 2 if n == 1: return", "idx += 1 return dp[n] def test(self): assert(self.climbStairs(2) == 2) assert(self.climbStairs(3) == 3)", "def test(self): assert(self.climbStairs(2) == 2) assert(self.climbStairs(3) == 3) assert(self.climbStairs(4) == 5) sol =", "dp[idx-2] idx += 1 return dp[n] def test(self): assert(self.climbStairs(2) == 2) assert(self.climbStairs(3) ==", "while(idx <= n): # print(idx) dp[idx] = dp[idx-1] + dp[idx-2] idx += 1", "x=2 -> 2 if n == 1: return 1 if n == 2:", "id=70 lang=python3 # # [70] Climbing Stairs # # @lc code=start class Solution:", "dp[n] def test(self): assert(self.climbStairs(2) == 2) assert(self.climbStairs(3) == 3) assert(self.climbStairs(4) == 5) sol", "code=start class Solution: def climbStairs(self, n: int) -> int: # 不管是走几个台阶. # 要么走1步,", "assert(self.climbStairs(3) == 3) assert(self.climbStairs(4) == 5) sol = Solution() sol.test() # @lc code=end", "# @lc app=leetcode id=70 lang=python3 # # [70] Climbing Stairs # # @lc", "assert(self.climbStairs(2) == 2) assert(self.climbStairs(3) == 3) assert(self.climbStairs(4) == 5) sol = Solution() sol.test()", "# 要么走1步, 要么走2步 # 那么对于n级台阶, 只有两种情况 # 从n-1级走1步, 或者, 从n-2级台阶走2步 # 第 n", "要么走2步 # 那么对于n级台阶, 只有两种情况 # 从n-1级走1步, 或者, 从n-2级台阶走2步 # 第 n 阶的情况只能是上面两种情况之和 #", "@lc code=start class Solution: def climbStairs(self, n: int) -> int: # 不管是走几个台阶. #", "1 dp[2] = 2 idx = 3 while(idx <= n): # print(idx) dp[idx]", "== 1: return 1 if n == 2: return 2 dp = [0]*(n+1)", "+= 1 return dp[n] def test(self): assert(self.climbStairs(2) == 2) assert(self.climbStairs(3) == 3) assert(self.climbStairs(4)", "# 类似汉诺塔问题 # x=1 -> 1 # x=2 -> 2 if n ==", "# @lc code=start class Solution: def climbStairs(self, n: int) -> int: # 不管是走几个台阶.", "1: return 1 if n == 2: return 2 dp = [0]*(n+1) dp[1]", "test(self): assert(self.climbStairs(2) == 2) assert(self.climbStairs(3) == 3) assert(self.climbStairs(4) == 5) sol = Solution()", "第 n 阶的情况只能是上面两种情况之和 # 类似汉诺塔问题 # x=1 -> 1 # x=2 -> 2", "if n == 1: return 1 if n == 2: return 2 dp", "2) assert(self.climbStairs(3) == 3) assert(self.climbStairs(4) == 5) sol = Solution() sol.test() # @lc" ]
[ "import datetime import markupfield.fields from django.conf import settings class Migration(migrations.Migration): dependencies = [", "markupfield.fields from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), ]", "'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'),", "__future__ import unicode_literals from django.db import models, migrations import datetime import markupfield.fields from", "unicode_literals from django.db import models, migrations import datetime import markupfield.fields from django.conf import", "datetime import markupfield.fields from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on',", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown',", "utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime", "Text')])), ('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])), ('settled',", "models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True,", "b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={", "= [ migrations.CreateModel( name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100, blank=True)),", "Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='PaymentRequest', fields=[ ('id',", "'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3,", "[ migrations.CreateModel( name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100, blank=True)), ('description',", "class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='PaymentRequest', fields=[", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30,", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "'Restructured Text')])), ('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])),", "-*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations", "from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "from django.db import models, migrations import datetime import markupfield.fields from django.conf import settings", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "b'USD'), (b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),", "migrations.CreateModel( name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True,", "('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={ },", "b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('amount', models.FloatField()), ('_description_rendered',", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models,", "(b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ],", "django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='PaymentRequest',", "django.db import models, migrations import datetime import markupfield.fields from django.conf import settings class", "models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,),", "import unicode_literals from django.db import models, migrations import datetime import markupfield.fields from django.conf", "('title', models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'', b'--'),", "primary_key=True)), ('title', models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'',", "('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'),", "operations = [ migrations.CreateModel( name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100,", "(b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)),", "blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'', b'--'), (b'html', 'HTML'),", "] operations = [ migrations.CreateModel( name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title',", "markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'),", "(b'restructuredtext', 'Restructured Text')])), ('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR',", "auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True,", "serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False,", "migrations import datetime import markupfield.fields from django.conf import settings class Migration(migrations.Migration): dependencies =", "-*- from __future__ import unicode_literals from django.db import models, migrations import datetime import", "blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown',", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "(b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD',", "('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)),", "models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on',", "from __future__ import unicode_literals from django.db import models, migrations import datetime import markupfield.fields", "(b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD',", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type',", "editable=False, blank=True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])),", "('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain',", "coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import", "import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "import markupfield.fields from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "import models, migrations import datetime import markupfield.fields from django.conf import settings class Migration(migrations.Migration):", "choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)), ('user',", "blank=True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('amount',", "('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)),", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID',", "'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('amount', models.FloatField()), ('_description_rendered', models.TextField(editable=False)), ('currency',", "models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext',", "models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)), ('description_markup_type', models.CharField(default=b'markdown', max_length=30, editable=False, blank=True, choices=[(b'', b'--'), (b'html',", "models, migrations import datetime import markupfield.fields from django.conf import settings class Migration(migrations.Migration): dependencies", "max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)),", "('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now, null=True, blank=True)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ),", "max_length=30, editable=False, blank=True, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured", "choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('amount', models.FloatField()),", "name='PaymentRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100, blank=True)), ('description', markupfield.fields.MarkupField(rendered_field=True, blank=True)),", "('currency', models.CharField(default=b'USD', max_length=3, choices=[(b'USD', b'USD'), (b'EUR', b'EUR')])), ('settled', models.BooleanField(default=False)), ('created_on', models.DateTimeField(auto_now_add=True)), ('paid_on', models.DateTimeField(default=datetime.datetime.now," ]
[ "assuming only one) if session.browser is not None: session.notebook.reload(session.browser) return editor_msg = session.notebook.dispatch(session.browser,", "result})) self.notebook.completion_info = None return if session.browser and (status == 'comm_open'): logging.info(\"REQUEST TO", "try: val = self.server.queue.get_nowait() self.server.queue.task_done() result, status = val except: return if status", "self.notebook.message(session.browser, 'comm_open', result['content']) # e.g: # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO',", "session.browser and (status == 'comm_msg'): buffers = result['buffers'] metadata = result.get('metadata', {}) self.notebook.message(session.browser,", "else: outnode, execution_count = result, status if session.browser: cell = self.notebook.find_cell(execution_count) if cell", "may be no cell if running a silent execution position = self.notebook.cell_position(cell) if", "editor browser connection expected\") self._editor = connection session = Session() class PeriodicOutputCallback(object): \"\"\"", "html_handler = (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", WS)])) ws_server.listen(ws_port, host)", "switch_notebook(self, notebook): self.notebook = notebook def start(self): self.callback = ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def", "if execution_count is None: # For silent execution before *any* output return #", "# If you hit reload in the browser, the CSS needs to be", "logging.info(\"Connection opened\") def toggle_notebook(self, name): notebook = session.buffers.get(name, None) if notebook is None:", "position = self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position'] # Adjusted for emacs point position start_delta", "commands logging.info(u\"Received %s command\" % payload['cmd']) else: logging.info(u\"Received message: {0:<.100}\".format(message)) if payload.get('cmd') ==", "the browser, the CSS needs to be re-sent session.notebook.update_theme(self, css=None) return # SOME", "import logging logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\",", "httpserver from tornado import ioloop from tornado import websocket import os import sys", "session. \"\"\" def __init__(self): self._browser = None self._editor = None self.notebook = None", "def on_message(self, message): \"Websocket on_message handler. Tracks connection type.\" try: payload = json.loads(message)", "= {} def reset(self): self._browser = None self.editors = [] self.notebook = None", "self._browser = None self.editors = [] self.notebook = None self.buffers = {} @property", "expected\") self._browser = connection @property def editor(self): return self._editor @editor.setter def editor(self, connection):", "tornado import websocket import os import sys import json import webbrowser import nbformat", "to by the ThreadedExecutor. \"\"\" def __init__(self, server, period=20): self.server = server self.notebook", "start? self.notebook.update_cell_outputs(session.browser, position, outnode) class WS(websocket.WebSocketHandler): def open(self): self.queue = Queue() self.output_callback =", "notebook\") session.notebook.reload(self) # If you hit reload in the browser, the CSS needs", "opened notebook\") session.notebook.reload(self) # If you hit reload in the browser, the CSS", "not None: session.notebook.reload(session.browser) return editor_msg = session.notebook.dispatch(session.browser, payload) if (editor_msg is not None)", "websocket connection but to the whole server session. \"\"\" def __init__(self): self._browser =", "__init__(self, server, period=20): self.server = server self.notebook = None self.period = period def", "return if payload.get('init', False) == 'editor': logging.info('Added editor client connection') session.editor = self", "without a browser tab open! self.toggle_notebook(payload['name']) if payload.get('cmd', False) == 'reload_page': # Reload", "session.notebook.update_theme(self, css=None) return # SOME COMMANDS (e.g mirroring) should happen even without a", "(for now) notebook = ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\", self.queue), name=name, cells=list()) session.buffers[name] = notebook", "start(self): self.callback = ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def stop(self): self.callback.stop() def __call__(self): \"Processes queue", "return # SOME COMMANDS (e.g mirroring) should happen even without a browser tab", "For silent execution before *any* output return # What about silent execution after", "session.editor = self return if payload.get('init', False) == 'browser': session.browser = self logging.info('Added", "server self.notebook = None self.period = period def switch_notebook(self, notebook): self.notebook = notebook", "self._browser @browser.setter def browser(self, connection): if self._browser is not None: logging.info(\"WARNING: Only one", "None) if notebook is None: # Create notebook # Note that there are", "SOME COMMANDS (e.g mirroring) should happen even without a browser tab open! self.toggle_notebook(payload['name'])", "False) == 'reload_page': # Reload over the browser connection (currently assuming only one)", "self.notebook.update_cell_outputs(session.browser, position, outnode) class WS(websocket.WebSocketHandler): def open(self): self.queue = Queue() self.output_callback = PeriodicOutputCallback(self)", "if payload.get('init', False) == 'editor': logging.info('Added editor client connection') session.editor = self return", "import ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH class Session(object): \"\"\" Global", "'comm_msg' {'msg_type': 'comm_msg', 'metadata': metadata, 'content': result['content']}, buffers=buffers) return else: outnode, execution_count =", "session.notebook = notebook self.output_callback.switch_notebook(notebook) def on_message(self, message): \"Websocket on_message handler. Tracks connection type.\"", "session.notebook.reload(self) # If you hit reload in the browser, the CSS needs to", "cell is None: return # There may be no cell if running a", "result['cursor_end'] = position - end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info = None return if", "ThreadedExecutor. \"\"\" def __init__(self, server, period=20): self.server = server self.notebook = None self.period", "payload = json.loads(message) except Exception as e: logging.info('JSON parse exception: %s' % str(e))", "CSS needs to be re-sent session.notebook.update_theme(self, css=None) return # SOME COMMANDS (e.g mirroring)", "= Queue() self.output_callback = PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection opened\") def toggle_notebook(self, name): notebook =", "(r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", WS)])) ws_server.listen(ws_port, host) logging.info(\"STARTED: Server", "== 'reset_server': self.output_callback.stop() session.reset() return if payload.get('init', False) == 'editor': logging.info('Added editor client", "None self.notebook = None self.buffers = {} def reset(self): self._browser = None self.editors", "queue import Queue from .execute import ThreadedExecutor from .cells import ExecutableNotebook STATIC_PATH =", "start_delta = relative_position - result['cursor_start'] end_delta = relative_position - result['cursor_end'] result['cursor_start'] = position", "reset(self): self._browser = None self.editors = [] self.notebook = None self.buffers = {}", "== 'editor': logging.info('Added editor client connection') session.editor = self return if payload.get('init', False)", "the browser connection (currently assuming only one) if session.browser is not None: session.notebook.reload(session.browser)", "is None: # For silent execution before *any* output return # What about", "Session(object): \"\"\" Global state of the server that doesn't belong to any single", "stop(self): self.callback.stop() def __call__(self): \"Processes queue pushed to by ThreadedExecutor\" try: val =", "os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH class Session(object): \"\"\" Global state of the server", "result['cursor_start'] = position - start_delta result['cursor_end'] = position - end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result}))", "not None: logging.info(\"WARNING: Only editor browser connection expected\") self._editor = connection session =", "Note that there are multiple connections and we want only one notebook! #", "logging.info('Added browser client connection') if session.notebook and len(session.notebook.cells) > 0: # TODO: Needs", "up a periodic callback to push output to cells by polling from the", "updating logging.info(\"Restart with previously opened notebook\") session.notebook.reload(self) # If you hit reload in", ".execute import ThreadedExecutor from .cells import ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH =", "self.output_callback.switch_notebook(notebook) def on_message(self, message): \"Websocket on_message handler. Tracks connection type.\" try: payload =", "silent execution position = self.notebook.cell_position(cell) if execution_count is None: # For silent execution", "is None: return # There may be no cell if running a silent", "push output to cells by polling from the queue pushed to by the", "we want only one notebook! # (for now) notebook = ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\",", "payload['cmd'] in ['start_mirror']: # Verbose commands logging.info(u\"Received %s command\" % payload['cmd']) else: logging.info(u\"Received", "result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant 'comm_msg' {'msg_type': 'comm_msg', 'metadata': metadata, 'content':", "'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH class Session(object): \"\"\" Global state of the server that", "return elif session.browser and (status == 'comm_msg'): buffers = result['buffers'] metadata = result.get('metadata',", "return if 'cmd' in payload: if payload['cmd'] in ['start_mirror']: # Verbose commands logging.info(u\"Received", "position = self.notebook.cell_position(cell) if execution_count is None: # For silent execution before *any*", "= Session() class PeriodicOutputCallback(object): \"\"\" Sets up a periodic callback to push output", "def stop(self): self.callback.stop() def __call__(self): \"Processes queue pushed to by ThreadedExecutor\" try: val", "session.browser = self logging.info('Added browser client connection') if session.notebook and len(session.notebook.cells) > 0:", "session.browser is not None: session.notebook.reload(session.browser) return editor_msg = session.notebook.dispatch(session.browser, payload) if (editor_msg is", "def browser(self): return self._browser @browser.setter def browser(self, connection): if self._browser is not None:", "try: payload = json.loads(message) except Exception as e: logging.info('JSON parse exception: %s' %", "There may be no cell if running a silent execution position = self.notebook.cell_position(cell)", "self is session.browser: session.browser = None self.output_callback.stop() def serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import logging", "the whole server session. \"\"\" def __init__(self): self._browser = None self._editor = None", "self._browser = connection @property def editor(self): return self._editor @editor.setter def editor(self, connection): if", "self.notebook.cell_position(cell) if execution_count is None: # For silent execution before *any* output return", "outnode, execution_count = result, status if session.browser: cell = self.notebook.find_cell(execution_count) if cell is", "connection type.\" try: payload = json.loads(message) except Exception as e: logging.info('JSON parse exception:", "result) self.notebook.message(session.browser, 'comm_open', result['content']) # e.g: # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name':", "# TODO: Needs updating logging.info(\"Restart with previously opened notebook\") session.notebook.reload(self) # If you", "with previously opened notebook\") session.notebook.reload(self) # If you hit reload in the browser,", "None self.period = period def switch_notebook(self, notebook): self.notebook = notebook def start(self): self.callback", "only one notebook! # (for now) notebook = ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\", self.queue), name=name,", "ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\", self.queue), name=name, cells=list()) session.buffers[name] = notebook session.notebook = notebook self.output_callback.switch_notebook(notebook)", "execution after start? self.notebook.update_cell_outputs(session.browser, position, outnode) class WS(websocket.WebSocketHandler): def open(self): self.queue = Queue()", "COMMANDS (e.g mirroring) should happen even without a browser tab open! self.toggle_notebook(payload['name']) if", "connection (currently assuming only one) if session.browser is not None: session.notebook.reload(session.browser) return editor_msg", "editor(self): return self._editor @editor.setter def editor(self, connection): if self._editor is not None: logging.info(\"WARNING:", "notebook = session.buffers.get(name, None) if notebook is None: # Create notebook # Note", "and (session.editor is not None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin): return True def on_close(self):", "is not None: logging.info(\"WARNING: Only one browser connection expected\") self._browser = connection @property", "= relative_position - result['cursor_end'] result['cursor_start'] = position - start_delta result['cursor_end'] = position -", "@editor.setter def editor(self, connection): if self._editor is not None: logging.info(\"WARNING: Only editor browser", "\"threaded-kernel\", self.queue), name=name, cells=list()) session.buffers[name] = notebook session.notebook = notebook self.output_callback.switch_notebook(notebook) def on_message(self,", "result['content']}, buffers=buffers) return else: outnode, execution_count = result, status if session.browser: cell =", "True def on_close(self): logging.info(\"ON_CLOSE\") if self is session.browser: session.browser = None self.output_callback.stop() def", "def editor(self): return self._editor @editor.setter def editor(self, connection): if self._editor is not None:", "connection @property def editor(self): return self._editor @editor.setter def editor(self, connection): if self._editor is", "parse exception: %s' % str(e)) return if 'cmd' in payload: if payload['cmd'] in", "editor client connection') session.editor = self return if payload.get('init', False) == 'browser': session.browser", "connection') if session.notebook and len(session.notebook.cells) > 0: # TODO: Needs updating logging.info(\"Restart with", "None: # Create notebook # Note that there are multiple connections and we", "import json import webbrowser import nbformat from queue import Queue from .execute import", "to the whole server session. \"\"\" def __init__(self): self._browser = None self._editor =", "= None self.period = period def switch_notebook(self, notebook): self.notebook = notebook def start(self):", "tornado import httpserver from tornado import ioloop from tornado import websocket import os", "def reset(self): self._browser = None self.editors = [] self.notebook = None self.buffers =", "relative_position = self.notebook.completion_info['relative_position'] # Adjusted for emacs point position start_delta = relative_position -", "but to the whole server session. \"\"\" def __init__(self): self._browser = None self._editor", "multiple connections and we want only one notebook! # (for now) notebook =", "else: logging.info(u\"Received message: {0:<.100}\".format(message)) if payload.get('cmd') == 'reset_server': self.output_callback.stop() session.reset() return if payload.get('init',", "COMM FOR JS: %s\" % result) self.notebook.message(session.browser, 'comm_open', result['content']) # e.g: # {'data':", "self.output_callback.stop() def serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)', tornado.web.StaticFileHandler, {'path':", "return else: outnode, execution_count = result, status if session.browser: cell = self.notebook.find_cell(execution_count) if", "connection): if self._editor is not None: logging.info(\"WARNING: Only editor browser connection expected\") self._editor", "{'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", WS)])) ws_server.listen(ws_port, host) logging.info(\"STARTED: Server started and", "self.notebook.find_cell(execution_count) if cell is None: return # There may be no cell if", "notebook is None: # Create notebook # Note that there are multiple connections", "def switch_notebook(self, notebook): self.notebook = notebook def start(self): self.callback = ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start()", "__call__(self): \"Processes queue pushed to by ThreadedExecutor\" try: val = self.server.queue.get_nowait() self.server.queue.task_done() result,", "toggle_notebook(self, name): notebook = session.buffers.get(name, None) if notebook is None: # Create notebook", "'editor': logging.info('Added editor client connection') session.editor = self return if payload.get('init', False) ==", "self.server.queue.get_nowait() self.server.queue.task_done() result, status = val except: return if status == 'completion': position", "relative_position - result['cursor_end'] result['cursor_start'] = position - start_delta result['cursor_end'] = position - end_delta", "editor(self, connection): if self._editor is not None: logging.info(\"WARNING: Only editor browser connection expected\")", "= relative_position - result['cursor_start'] end_delta = relative_position - result['cursor_end'] result['cursor_start'] = position -", "server that doesn't belong to any single websocket connection but to the whole", "notebook def start(self): self.callback = ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def stop(self): self.callback.stop() def __call__(self):", "connection): if self._browser is not None: logging.info(\"WARNING: Only one browser connection expected\") self._browser", "self.period = period def switch_notebook(self, notebook): self.notebook = notebook def start(self): self.callback =", "the CSS needs to be re-sent session.notebook.update_theme(self, css=None) return # SOME COMMANDS (e.g", "and len(session.notebook.cells) > 0: # TODO: Needs updating logging.info(\"Restart with previously opened notebook\")", "TODO: Needs updating logging.info(\"Restart with previously opened notebook\") session.notebook.reload(self) # If you hit", "now) notebook = ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\", self.queue), name=name, cells=list()) session.buffers[name] = notebook session.notebook", "except Exception as e: logging.info('JSON parse exception: %s' % str(e)) return if 'cmd'", "logging.info(\"WARNING: Only one browser connection expected\") self._browser = connection @property def editor(self): return", "'content': result['content']}, buffers=buffers) return else: outnode, execution_count = result, status if session.browser: cell", "not None: logging.info(\"WARNING: Only one browser connection expected\") self._browser = connection @property def", "return if session.browser and (status == 'comm_open'): logging.info(\"REQUEST TO OPEN COMM FOR JS:", "self.buffers = {} def reset(self): self._browser = None self.editors = [] self.notebook =", "cell if running a silent execution position = self.notebook.cell_position(cell) if execution_count is None:", "message: {0:<.100}\".format(message)) if payload.get('cmd') == 'reset_server': self.output_callback.stop() session.reset() return if payload.get('init', False) ==", "# For silent execution before *any* output return # What about silent execution", "is not None: session.notebook.reload(session.browser) return editor_msg = session.notebook.dispatch(session.browser, payload) if (editor_msg is not", "metadata, 'content': result['content']}, buffers=buffers) return else: outnode, execution_count = result, status if session.browser:", "there are multiple connections and we want only one notebook! # (for now)", "logging.info(\"Restart with previously opened notebook\") session.notebook.reload(self) # If you hit reload in the", "in payload: if payload['cmd'] in ['start_mirror']: # Verbose commands logging.info(u\"Received %s command\" %", "about silent execution after start? self.notebook.update_cell_outputs(session.browser, position, outnode) class WS(websocket.WebSocketHandler): def open(self): self.queue", "(status == 'comm_msg'): buffers = result['buffers'] metadata = result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg', #", "TO OPEN COMM FOR JS: %s\" % result) self.notebook.message(session.browser, 'comm_open', result['content']) # e.g:", "buffers = result['buffers'] metadata = result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant 'comm_msg'", "notebook # Note that there are multiple connections and we want only one", "output return # What about silent execution after start? self.notebook.update_cell_outputs(session.browser, position, outnode) class", "- result['cursor_end'] result['cursor_start'] = position - start_delta result['cursor_end'] = position - end_delta session.editor.write_message(json.dumps({'cmd':'completion',", "silent execution before *any* output return # What about silent execution after start?", "serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port)", "= server self.notebook = None self.period = period def switch_notebook(self, notebook): self.notebook =", "css=None) return # SOME COMMANDS (e.g mirroring) should happen even without a browser", "execution before *any* output return # What about silent execution after start? self.notebook.update_cell_outputs(session.browser,", "notebook): self.notebook = notebook def start(self): self.callback = ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def stop(self):", "= None self.notebook = None self.buffers = {} def reset(self): self._browser = None", "def toggle_notebook(self, name): notebook = session.buffers.get(name, None) if notebook is None: # Create", "def check_origin(self, origin): return True def on_close(self): logging.info(\"ON_CLOSE\") if self is session.browser: session.browser", "% str(e)) return if 'cmd' in payload: if payload['cmd'] in ['start_mirror']: # Verbose", "import ThreadedExecutor from .cells import ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH", "{0:<.100}\".format(message)) if payload.get('cmd') == 'reset_server': self.output_callback.stop() session.reset() return if payload.get('init', False) == 'editor':", "= result['buffers'] metadata = result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant 'comm_msg' {'msg_type':", "== 'comm_msg'): buffers = result['buffers'] metadata = result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg', # FIXME:", "message): \"Websocket on_message handler. Tracks connection type.\" try: payload = json.loads(message) except Exception", "result['cursor_end'] result['cursor_start'] = position - start_delta result['cursor_end'] = position - end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data':", "if payload.get('cmd') == 'reset_server': self.output_callback.stop() session.reset() return if payload.get('init', False) == 'editor': logging.info('Added", "from tornado import httpserver from tornado import ioloop from tornado import websocket import", "%s command\" % payload['cmd']) else: logging.info(u\"Received message: {0:<.100}\".format(message)) if payload.get('cmd') == 'reset_server': self.output_callback.stop()", "STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", WS)])) ws_server.listen(ws_port, host) logging.info(\"STARTED: Server started and listening\")", "def open(self): self.queue = Queue() self.output_callback = PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection opened\") def toggle_notebook(self,", "PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection opened\") def toggle_notebook(self, name): notebook = session.buffers.get(name, None) if notebook", "payload.get('init', False) == 'editor': logging.info('Added editor client connection') session.editor = self return if", "None self.output_callback.stop() def serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)', tornado.web.StaticFileHandler,", "a periodic callback to push output to cells by polling from the queue", "except: return if status == 'completion': position = self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position'] #", "session.buffers.get(name, None) if notebook is None: # Create notebook # Note that there", "browser connection expected\") self._browser = connection @property def editor(self): return self._editor @editor.setter def", "self.period) self.callback.start() def stop(self): self.callback.stop() def __call__(self): \"Processes queue pushed to by ThreadedExecutor\"", "'comm_msg'): buffers = result['buffers'] metadata = result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant", "ioloop from tornado import websocket import os import sys import json import webbrowser", "self._editor = connection session = Session() class PeriodicOutputCallback(object): \"\"\" Sets up a periodic", "json.loads(message) except Exception as e: logging.info('JSON parse exception: %s' % str(e)) return if", "__init__(self): self._browser = None self._editor = None self.notebook = None self.buffers = {}", "JS: %s\" % result) self.notebook.message(session.browser, 'comm_open', result['content']) # e.g: # {'data': {}, 'comm_id':", "output to cells by polling from the queue pushed to by the ThreadedExecutor.", "ThreadedExecutor from .cells import ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH class", "payload.get('cmd') == 'reset_server': self.output_callback.stop() session.reset() return if payload.get('init', False) == 'editor': logging.info('Added editor", "connections and we want only one notebook! # (for now) notebook = ExecutableNotebook(", "= self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position'] # Adjusted for emacs point position start_delta =", "{} def reset(self): self._browser = None self.editors = [] self.notebook = None self.buffers", "import logging import tornado import tornado.web from tornado import httpserver from tornado import", "emacs point position start_delta = relative_position - result['cursor_start'] end_delta = relative_position - result['cursor_end']", "WS(websocket.WebSocketHandler): def open(self): self.queue = Queue() self.output_callback = PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection opened\") def", "session.browser: cell = self.notebook.find_cell(execution_count) if cell is None: return # There may be", "host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server =", "origin): return True def on_close(self): logging.info(\"ON_CLOSE\") if self is session.browser: session.browser = None", "def __init__(self, server, period=20): self.server = server self.notebook = None self.period = period", "self.server = server self.notebook = None self.period = period def switch_notebook(self, notebook): self.notebook", "session.notebook and len(session.notebook.cells) > 0: # TODO: Needs updating logging.info(\"Restart with previously opened", "tornado.web from tornado import httpserver from tornado import ioloop from tornado import websocket", "period def switch_notebook(self, notebook): self.notebook = notebook def start(self): self.callback = ioloop.PeriodicCallback(self.__call__, self.period)", "None: session.notebook.reload(session.browser) return editor_msg = session.notebook.dispatch(session.browser, payload) if (editor_msg is not None) and", "single websocket connection but to the whole server session. \"\"\" def __init__(self): self._browser", "be no cell if running a silent execution position = self.notebook.cell_position(cell) if execution_count", "import Queue from .execute import ThreadedExecutor from .cells import ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0],", "by ThreadedExecutor\" try: val = self.server.queue.get_nowait() self.server.queue.task_done() result, status = val except: return", "not None) and (session.editor is not None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin): return True", "Tracks connection type.\" try: payload = json.loads(message) except Exception as e: logging.info('JSON parse", "if notebook is None: # Create notebook # Note that there are multiple", "server session. \"\"\" def __init__(self): self._browser = None self._editor = None self.notebook =", "# Create notebook # Note that there are multiple connections and we want", "websocket import os import sys import json import webbrowser import nbformat from queue", "open! self.toggle_notebook(payload['name']) if payload.get('cmd', False) == 'reload_page': # Reload over the browser connection", "import os import sys import json import webbrowser import nbformat from queue import", "name): notebook = session.buffers.get(name, None) if notebook is None: # Create notebook #", "connection') session.editor = self return if payload.get('init', False) == 'browser': session.browser = self", "== 'browser': session.browser = self logging.info('Added browser client connection') if session.notebook and len(session.notebook.cells)", "check_origin(self, origin): return True def on_close(self): logging.info(\"ON_CLOSE\") if self is session.browser: session.browser =", "= ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\", self.queue), name=name, cells=list()) session.buffers[name] = notebook session.notebook = notebook", "webbrowser import nbformat from queue import Queue from .execute import ThreadedExecutor from .cells", "json import webbrowser import nbformat from queue import Queue from .execute import ThreadedExecutor", "after start? self.notebook.update_cell_outputs(session.browser, position, outnode) class WS(websocket.WebSocketHandler): def open(self): self.queue = Queue() self.output_callback", "% result) self.notebook.message(session.browser, 'comm_open', result['content']) # e.g: # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', #", "execution_count is None: # For silent execution before *any* output return # What", "self._browser = None self._editor = None self.notebook = None self.buffers = {} def", "Verbose commands logging.info(u\"Received %s command\" % payload['cmd']) else: logging.info(u\"Received message: {0:<.100}\".format(message)) if payload.get('cmd')", "payload: if payload['cmd'] in ['start_mirror']: # Verbose commands logging.info(u\"Received %s command\" % payload['cmd'])", "if self._browser is not None: logging.info(\"WARNING: Only one browser connection expected\") self._browser =", "def start(self): self.callback = ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def stop(self): self.callback.stop() def __call__(self): \"Processes", "session.browser = None self.output_callback.stop() def serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO) html_handler =", "periodic callback to push output to cells by polling from the queue pushed", "return True def on_close(self): logging.info(\"ON_CLOSE\") if self is session.browser: session.browser = None self.output_callback.stop()", "{'msg_type': 'comm_msg', 'metadata': metadata, 'content': result['content']}, buffers=buffers) return else: outnode, execution_count = result,", "= session.notebook.dispatch(session.browser, payload) if (editor_msg is not None) and (session.editor is not None):", "session.browser and (status == 'comm_open'): logging.info(\"REQUEST TO OPEN COMM FOR JS: %s\" %", "\"Processes queue pushed to by ThreadedExecutor\" try: val = self.server.queue.get_nowait() self.server.queue.task_done() result, status", "= PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection opened\") def toggle_notebook(self, name): notebook = session.buffers.get(name, None) if", "that there are multiple connections and we want only one notebook! # (for", "= position - start_delta result['cursor_end'] = position - end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info", "logging.info(u\"Received message: {0:<.100}\".format(message)) if payload.get('cmd') == 'reset_server': self.output_callback.stop() session.reset() return if payload.get('init', False)", "#!/usr/bin/env python import logging import tornado import tornado.web from tornado import httpserver from", "Exception as e: logging.info('JSON parse exception: %s' % str(e)) return if 'cmd' in", "'comm_msg', # FIXME: redundant 'comm_msg' {'msg_type': 'comm_msg', 'metadata': metadata, 'content': result['content']}, buffers=buffers) return", "position, outnode) class WS(websocket.WebSocketHandler): def open(self): self.queue = Queue() self.output_callback = PeriodicOutputCallback(self) self.output_callback.start()", "from queue import Queue from .execute import ThreadedExecutor from .cells import ExecutableNotebook STATIC_PATH", "None return if session.browser and (status == 'comm_open'): logging.info(\"REQUEST TO OPEN COMM FOR", "logging.info(u\"Received %s command\" % payload['cmd']) else: logging.info(u\"Received message: {0:<.100}\".format(message)) if payload.get('cmd') == 'reset_server':", "browser connection expected\") self._editor = connection session = Session() class PeriodicOutputCallback(object): \"\"\" Sets", "is not None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin): return True def on_close(self): logging.info(\"ON_CLOSE\") if", "# SOME COMMANDS (e.g mirroring) should happen even without a browser tab open!", "= session.buffers.get(name, None) if notebook is None: # Create notebook # Note that", "'comm_open', result['content']) # e.g: # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO', 'target_module':", "self.callback.start() def stop(self): self.callback.stop() def __call__(self): \"Processes queue pushed to by ThreadedExecutor\" try:", "start_delta result['cursor_end'] = position - end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info = None return", "'data': result})) self.notebook.completion_info = None return if session.browser and (status == 'comm_open'): logging.info(\"REQUEST", "%s\" % result) self.notebook.message(session.browser, 'comm_open', result['content']) # e.g: # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc',", "if running a silent execution position = self.notebook.cell_position(cell) if execution_count is None: #", "Queue() self.output_callback = PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection opened\") def toggle_notebook(self, name): notebook = session.buffers.get(name,", "self.output_callback.start() logging.info(\"Connection opened\") def toggle_notebook(self, name): notebook = session.buffers.get(name, None) if notebook is", "Reload over the browser connection (currently assuming only one) if session.browser is not", "= self.notebook.find_cell(execution_count) if cell is None: return # There may be no cell", "session.notebook.dispatch(session.browser, payload) if (editor_msg is not None) and (session.editor is not None): session.editor.write_message(json.dumps(editor_msg))", "result['content']) # e.g: # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO', 'target_module': None}", "are multiple connections and we want only one notebook! # (for now) notebook", "- end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info = None return if session.browser and (status", "not None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin): return True def on_close(self): logging.info(\"ON_CLOSE\") if self", "tornado import ioloop from tornado import websocket import os import sys import json", "= notebook def start(self): self.callback = ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def stop(self): self.callback.stop() def", "from .cells import ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH class Session(object):", "def editor(self, connection): if self._editor is not None: logging.info(\"WARNING: Only editor browser connection", "Session() class PeriodicOutputCallback(object): \"\"\" Sets up a periodic callback to push output to", "None} return elif session.browser and (status == 'comm_msg'): buffers = result['buffers'] metadata =", "a browser tab open! self.toggle_notebook(payload['name']) if payload.get('cmd', False) == 'reload_page': # Reload over", "previously opened notebook\") session.notebook.reload(self) # If you hit reload in the browser, the", "Needs updating logging.info(\"Restart with previously opened notebook\") session.notebook.reload(self) # If you hit reload", "one notebook! # (for now) notebook = ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\", self.queue), name=name, cells=list())", "result['buffers'] metadata = result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant 'comm_msg' {'msg_type': 'comm_msg',", "[] self.notebook = None self.buffers = {} @property def browser(self): return self._browser @browser.setter", "= ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def stop(self): self.callback.stop() def __call__(self): \"Processes queue pushed to", "= result, status if session.browser: cell = self.notebook.find_cell(execution_count) if cell is None: return", "str(e)) return if 'cmd' in payload: if payload['cmd'] in ['start_mirror']: # Verbose commands", "relative_position - result['cursor_start'] end_delta = relative_position - result['cursor_end'] result['cursor_start'] = position - start_delta", "self logging.info('Added browser client connection') if session.notebook and len(session.notebook.cells) > 0: # TODO:", "import httpserver from tornado import ioloop from tornado import websocket import os import", "queue pushed to by ThreadedExecutor\" try: val = self.server.queue.get_nowait() self.server.queue.task_done() result, status =", "= period def switch_notebook(self, notebook): self.notebook = notebook def start(self): self.callback = ioloop.PeriodicCallback(self.__call__,", "return self._editor @editor.setter def editor(self, connection): if self._editor is not None: logging.info(\"WARNING: Only", "point position start_delta = relative_position - result['cursor_start'] end_delta = relative_position - result['cursor_end'] result['cursor_start']", "'browser': session.browser = self logging.info('Added browser client connection') if session.notebook and len(session.notebook.cells) >", "connection but to the whole server session. \"\"\" def __init__(self): self._browser = None", "self.notebook = notebook def start(self): self.callback = ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def stop(self): self.callback.stop()", "= val except: return if status == 'completion': position = self.notebook.completion_info['position'] relative_position =", "handler. Tracks connection type.\" try: payload = json.loads(message) except Exception as e: logging.info('JSON", "payload['cmd']) else: logging.info(u\"Received message: {0:<.100}\".format(message)) if payload.get('cmd') == 'reset_server': self.output_callback.stop() session.reset() return if", "html_port=8000, host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server", "logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", WS)])) ws_server.listen(ws_port,", "logging logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", WS)]))", "nbformat from queue import Queue from .execute import ThreadedExecutor from .cells import ExecutableNotebook", "logging.info(\"REQUEST TO OPEN COMM FOR JS: %s\" % result) self.notebook.message(session.browser, 'comm_open', result['content']) #", "metadata = result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant 'comm_msg' {'msg_type': 'comm_msg', 'metadata':", "to push output to cells by polling from the queue pushed to by", "If you hit reload in the browser, the CSS needs to be re-sent", "python import logging import tornado import tornado.web from tornado import httpserver from tornado", "browser, the CSS needs to be re-sent session.notebook.update_theme(self, css=None) return # SOME COMMANDS", "any single websocket connection but to the whole server session. \"\"\" def __init__(self):", "and we want only one notebook! # (for now) notebook = ExecutableNotebook( (ThreadedExecutor,", "in ['start_mirror']: # Verbose commands logging.info(u\"Received %s command\" % payload['cmd']) else: logging.info(u\"Received message:", "the server that doesn't belong to any single websocket connection but to the", "Global state of the server that doesn't belong to any single websocket connection", "len(session.notebook.cells) > 0: # TODO: Needs updating logging.info(\"Restart with previously opened notebook\") session.notebook.reload(self)", "= notebook session.notebook = notebook self.output_callback.switch_notebook(notebook) def on_message(self, message): \"Websocket on_message handler. Tracks", "be re-sent session.notebook.update_theme(self, css=None) return # SOME COMMANDS (e.g mirroring) should happen even", "command\" % payload['cmd']) else: logging.info(u\"Received message: {0:<.100}\".format(message)) if payload.get('cmd') == 'reset_server': self.output_callback.stop() session.reset()", "= json.loads(message) except Exception as e: logging.info('JSON parse exception: %s' % str(e)) return", "whole server session. \"\"\" def __init__(self): self._browser = None self._editor = None self.notebook", "if status == 'completion': position = self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position'] # Adjusted for", "# FIXME: redundant 'comm_msg' {'msg_type': 'comm_msg', 'metadata': metadata, 'content': result['content']}, buffers=buffers) return else:", "position - end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info = None return if session.browser and", "import tornado import tornado.web from tornado import httpserver from tornado import ioloop from", "that doesn't belong to any single websocket connection but to the whole server", "import sys import json import webbrowser import nbformat from queue import Queue from", "want only one notebook! # (for now) notebook = ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\", self.queue),", "position start_delta = relative_position - result['cursor_start'] end_delta = relative_position - result['cursor_end'] result['cursor_start'] =", "self.callback.stop() def __call__(self): \"Processes queue pushed to by ThreadedExecutor\" try: val = self.server.queue.get_nowait()", "on_message handler. Tracks connection type.\" try: payload = json.loads(message) except Exception as e:", "self.toggle_notebook(payload['name']) if payload.get('cmd', False) == 'reload_page': # Reload over the browser connection (currently", "polling from the queue pushed to by the ThreadedExecutor. \"\"\" def __init__(self, server,", "{}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO', 'target_module': None} return elif session.browser and (status", "execution_count = result, status if session.browser: cell = self.notebook.find_cell(execution_count) if cell is None:", "notebook session.notebook = notebook self.output_callback.switch_notebook(notebook) def on_message(self, message): \"Websocket on_message handler. Tracks connection", "import tornado.web from tornado import httpserver from tornado import ioloop from tornado import", "def on_close(self): logging.info(\"ON_CLOSE\") if self is session.browser: session.browser = None self.output_callback.stop() def serve(ws_port=9999,", "as e: logging.info('JSON parse exception: %s' % str(e)) return if 'cmd' in payload:", "(e.g mirroring) should happen even without a browser tab open! self.toggle_notebook(payload['name']) if payload.get('cmd',", "payload) if (editor_msg is not None) and (session.editor is not None): session.editor.write_message(json.dumps(editor_msg)) def", "state of the server that doesn't belong to any single websocket connection but", "= (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", WS)])) ws_server.listen(ws_port, host) logging.info(\"STARTED:", "self.server.queue.task_done() result, status = val except: return if status == 'completion': position =", "session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info = None return if session.browser and (status == 'comm_open'):", "= self logging.info('Added browser client connection') if session.notebook and len(session.notebook.cells) > 0: #", "# Note that there are multiple connections and we want only one notebook!", "(status == 'comm_open'): logging.info(\"REQUEST TO OPEN COMM FOR JS: %s\" % result) self.notebook.message(session.browser,", "editor_msg = session.notebook.dispatch(session.browser, payload) if (editor_msg is not None) and (session.editor is not", "'comm_msg', 'metadata': metadata, 'content': result['content']}, buffers=buffers) return else: outnode, execution_count = result, status", "Only one browser connection expected\") self._browser = connection @property def editor(self): return self._editor", "val except: return if status == 'completion': position = self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position']", "Sets up a periodic callback to push output to cells by polling from", "'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO', 'target_module': None} return elif session.browser and (status == 'comm_msg'):", "result, status if session.browser: cell = self.notebook.find_cell(execution_count) if cell is None: return #", "session.browser: session.browser = None self.output_callback.stop() def serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO) html_handler", "None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin): return True def on_close(self): logging.info(\"ON_CLOSE\") if self is", "payload.get('cmd', False) == 'reload_page': # Reload over the browser connection (currently assuming only", "= result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant 'comm_msg' {'msg_type': 'comm_msg', 'metadata': metadata,", "= self return if payload.get('init', False) == 'browser': session.browser = self logging.info('Added browser", "\"\"\" def __init__(self, server, period=20): self.server = server self.notebook = None self.period =", "browser tab open! self.toggle_notebook(payload['name']) if payload.get('cmd', False) == 'reload_page': # Reload over the", "by polling from the queue pushed to by the ThreadedExecutor. \"\"\" def __init__(self,", "is session.browser: session.browser = None self.output_callback.stop() def serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO)", "return self._browser @browser.setter def browser(self, connection): if self._browser is not None: logging.info(\"WARNING: Only", "notebook = ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\", self.queue), name=name, cells=list()) session.buffers[name] = notebook session.notebook =", "0: # TODO: Needs updating logging.info(\"Restart with previously opened notebook\") session.notebook.reload(self) # If", "tornado import tornado.web from tornado import httpserver from tornado import ioloop from tornado", "callback to push output to cells by polling from the queue pushed to", "cells=list()) session.buffers[name] = notebook session.notebook = notebook self.output_callback.switch_notebook(notebook) def on_message(self, message): \"Websocket on_message", "None: return # There may be no cell if running a silent execution", "the queue pushed to by the ThreadedExecutor. \"\"\" def __init__(self, server, period=20): self.server", "self.buffers = {} @property def browser(self): return self._browser @browser.setter def browser(self, connection): if", "cells by polling from the queue pushed to by the ThreadedExecutor. \"\"\" def", "client connection') if session.notebook and len(session.notebook.cells) > 0: # TODO: Needs updating logging.info(\"Restart", "= os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH class Session(object): \"\"\" Global state of the", "None self._editor = None self.notebook = None self.buffers = {} def reset(self): self._browser", "you hit reload in the browser, the CSS needs to be re-sent session.notebook.update_theme(self,", "def __call__(self): \"Processes queue pushed to by ThreadedExecutor\" try: val = self.server.queue.get_nowait() self.server.queue.task_done()", "is None: # Create notebook # Note that there are multiple connections and", "*any* output return # What about silent execution after start? self.notebook.update_cell_outputs(session.browser, position, outnode)", "connection expected\") self._browser = connection @property def editor(self): return self._editor @editor.setter def editor(self,", "exception: %s' % str(e)) return if 'cmd' in payload: if payload['cmd'] in ['start_mirror']:", "= notebook self.output_callback.switch_notebook(notebook) def on_message(self, message): \"Websocket on_message handler. Tracks connection type.\" try:", "= self.notebook.completion_info['relative_position'] # Adjusted for emacs point position start_delta = relative_position - result['cursor_start']", "status if session.browser: cell = self.notebook.find_cell(execution_count) if cell is None: return # There", "logging.info(\"WARNING: Only editor browser connection expected\") self._editor = connection session = Session() class", "import ioloop from tornado import websocket import os import sys import json import", "notebook! # (for now) notebook = ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\", self.queue), name=name, cells=list()) session.buffers[name]", "False) == 'editor': logging.info('Added editor client connection') session.editor = self return if payload.get('init',", "self.output_callback = PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection opened\") def toggle_notebook(self, name): notebook = session.buffers.get(name, None)", "= None self.output_callback.stop() def serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)',", "browser(self): return self._browser @browser.setter def browser(self, connection): if self._browser is not None: logging.info(\"WARNING:", "(currently assuming only one) if session.browser is not None: session.notebook.reload(session.browser) return editor_msg =", "connection session = Session() class PeriodicOutputCallback(object): \"\"\" Sets up a periodic callback to", "and (status == 'comm_open'): logging.info(\"REQUEST TO OPEN COMM FOR JS: %s\" % result)", "period=20): self.server = server self.notebook = None self.period = period def switch_notebook(self, notebook):", "= STATIC_PATH class Session(object): \"\"\" Global state of the server that doesn't belong", "result, status = val except: return if status == 'completion': position = self.notebook.completion_info['position']", "# Adjusted for emacs point position start_delta = relative_position - result['cursor_start'] end_delta =", "pushed to by ThreadedExecutor\" try: val = self.server.queue.get_nowait() self.server.queue.task_done() result, status = val", "None: logging.info(\"WARNING: Only one browser connection expected\") self._browser = connection @property def editor(self):", "ThreadedExecutor\" try: val = self.server.queue.get_nowait() self.server.queue.task_done() result, status = val except: return if", "'reset_server': self.output_callback.stop() session.reset() return if payload.get('init', False) == 'editor': logging.info('Added editor client connection')", "return if status == 'completion': position = self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position'] # Adjusted", "is not None) and (session.editor is not None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin): return", "logging.info('JSON parse exception: %s' % str(e)) return if 'cmd' in payload: if payload['cmd']", "\"\"\" def __init__(self): self._browser = None self._editor = None self.notebook = None self.buffers", "browser client connection') if session.notebook and len(session.notebook.cells) > 0: # TODO: Needs updating", "def __init__(self): self._browser = None self._editor = None self.notebook = None self.buffers =", "status = val except: return if status == 'completion': position = self.notebook.completion_info['position'] relative_position", "cell = self.notebook.find_cell(execution_count) if cell is None: return # There may be no", "import nbformat from queue import Queue from .execute import ThreadedExecutor from .cells import", "(session.editor is not None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin): return True def on_close(self): logging.info(\"ON_CLOSE\")", "FIXME: redundant 'comm_msg' {'msg_type': 'comm_msg', 'metadata': metadata, 'content': result['content']}, buffers=buffers) return else: outnode,", "session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin): return True def on_close(self): logging.info(\"ON_CLOSE\") if self is session.browser:", "return editor_msg = session.notebook.dispatch(session.browser, payload) if (editor_msg is not None) and (session.editor is", "Adjusted for emacs point position start_delta = relative_position - result['cursor_start'] end_delta = relative_position", "to cells by polling from the queue pushed to by the ThreadedExecutor. \"\"\"", "> 0: # TODO: Needs updating logging.info(\"Restart with previously opened notebook\") session.notebook.reload(self) #", "# Verbose commands logging.info(u\"Received %s command\" % payload['cmd']) else: logging.info(u\"Received message: {0:<.100}\".format(message)) if", "if 'cmd' in payload: if payload['cmd'] in ['start_mirror']: # Verbose commands logging.info(u\"Received %s", "outnode) class WS(websocket.WebSocketHandler): def open(self): self.queue = Queue() self.output_callback = PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection", "if payload.get('init', False) == 'browser': session.browser = self logging.info('Added browser client connection') if", "should happen even without a browser tab open! self.toggle_notebook(payload['name']) if payload.get('cmd', False) ==", "# Reload over the browser connection (currently assuming only one) if session.browser is", "{'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO', 'target_module': None} return elif session.browser and", "payload.get('init', False) == 'browser': session.browser = self logging.info('Added browser client connection') if session.notebook", "None self.editors = [] self.notebook = None self.buffers = {} @property def browser(self):", "'metadata': metadata, 'content': result['content']}, buffers=buffers) return else: outnode, execution_count = result, status if", "self._editor @editor.setter def editor(self, connection): if self._editor is not None: logging.info(\"WARNING: Only editor", "if self is session.browser: session.browser = None self.output_callback.stop() def serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import", "# {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO', 'target_module': None} return elif session.browser", "even without a browser tab open! self.toggle_notebook(payload['name']) if payload.get('cmd', False) == 'reload_page': #", "None: logging.info(\"WARNING: Only editor browser connection expected\") self._editor = connection session = Session()", "self.queue = Queue() self.output_callback = PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection opened\") def toggle_notebook(self, name): notebook", "needs to be re-sent session.notebook.update_theme(self, css=None) return # SOME COMMANDS (e.g mirroring) should", "open(self): self.queue = Queue() self.output_callback = PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection opened\") def toggle_notebook(self, name):", "session.buffers[name] = notebook session.notebook = notebook self.output_callback.switch_notebook(notebook) def on_message(self, message): \"Websocket on_message handler.", "browser connection (currently assuming only one) if session.browser is not None: session.notebook.reload(session.browser) return", "is not None: logging.info(\"WARNING: Only editor browser connection expected\") self._editor = connection session", "FOR JS: %s\" % result) self.notebook.message(session.browser, 'comm_open', result['content']) # e.g: # {'data': {},", "\"\"\" Sets up a periodic callback to push output to cells by polling", "False) == 'browser': session.browser = self logging.info('Added browser client connection') if session.notebook and", "'completion': position = self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position'] # Adjusted for emacs point position", "ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def stop(self): self.callback.stop() def __call__(self): \"Processes queue pushed to by", "class PeriodicOutputCallback(object): \"\"\" Sets up a periodic callback to push output to cells", "running a silent execution position = self.notebook.cell_position(cell) if execution_count is None: # For", "if session.notebook and len(session.notebook.cells) > 0: # TODO: Needs updating logging.info(\"Restart with previously", "def browser(self, connection): if self._browser is not None: logging.info(\"WARNING: Only one browser connection", "end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info = None return if session.browser and (status ==", "hit reload in the browser, the CSS needs to be re-sent session.notebook.update_theme(self, css=None)", "server, period=20): self.server = server self.notebook = None self.period = period def switch_notebook(self,", "% payload['cmd']) else: logging.info(u\"Received message: {0:<.100}\".format(message)) if payload.get('cmd') == 'reset_server': self.output_callback.stop() session.reset() return", "to any single websocket connection but to the whole server session. \"\"\" def", "from the queue pushed to by the ThreadedExecutor. \"\"\" def __init__(self, server, period=20):", "logging import tornado import tornado.web from tornado import httpserver from tornado import ioloop", "self return if payload.get('init', False) == 'browser': session.browser = self logging.info('Added browser client", "if session.browser: cell = self.notebook.find_cell(execution_count) if cell is None: return # There may", "%s' % str(e)) return if 'cmd' in payload: if payload['cmd'] in ['start_mirror']: #", "if session.browser and (status == 'comm_open'): logging.info(\"REQUEST TO OPEN COMM FOR JS: %s\"", "no cell if running a silent execution position = self.notebook.cell_position(cell) if execution_count is", "ExecutableNotebook.STATIC_PATH = STATIC_PATH class Session(object): \"\"\" Global state of the server that doesn't", "'ZOO', 'target_module': None} return elif session.browser and (status == 'comm_msg'): buffers = result['buffers']", "['start_mirror']: # Verbose commands logging.info(u\"Received %s command\" % payload['cmd']) else: logging.info(u\"Received message: {0:<.100}\".format(message))", "= None self._editor = None self.notebook = None self.buffers = {} def reset(self):", "import webbrowser import nbformat from queue import Queue from .execute import ThreadedExecutor from", "pushed to by the ThreadedExecutor. \"\"\" def __init__(self, server, period=20): self.server = server", "= connection @property def editor(self): return self._editor @editor.setter def editor(self, connection): if self._editor", "None self.buffers = {} def reset(self): self._browser = None self.editors = [] self.notebook", "== 'completion': position = self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position'] # Adjusted for emacs point", "== 'reload_page': # Reload over the browser connection (currently assuming only one) if", "self._browser is not None: logging.info(\"WARNING: Only one browser connection expected\") self._browser = connection", "class WS(websocket.WebSocketHandler): def open(self): self.queue = Queue() self.output_callback = PeriodicOutputCallback(self) self.output_callback.start() logging.info(\"Connection opened\")", "e: logging.info('JSON parse exception: %s' % str(e)) return if 'cmd' in payload: if", ".cells import ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH class Session(object): \"\"\"", "ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH class Session(object): \"\"\" Global state", "(ThreadedExecutor, \"threaded-kernel\", self.queue), name=name, cells=list()) session.buffers[name] = notebook session.notebook = notebook self.output_callback.switch_notebook(notebook) def", "- start_delta result['cursor_end'] = position - end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info = None", "self.notebook.completion_info['relative_position'] # Adjusted for emacs point position start_delta = relative_position - result['cursor_start'] end_delta", "if (editor_msg is not None) and (session.editor is not None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self,", "(editor_msg is not None) and (session.editor is not None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin):", "tornado.web.StaticFileHandler, {'path': STATIC_PATH}) tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", WS)])) ws_server.listen(ws_port, host) logging.info(\"STARTED: Server started", "def serve(ws_port=9999, html_port=8000, host='0.0.0.0'): import logging logging.basicConfig(level=logging.INFO) html_handler = (r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH})", "@browser.setter def browser(self, connection): if self._browser is not None: logging.info(\"WARNING: Only one browser", "name=name, cells=list()) session.buffers[name] = notebook session.notebook = notebook self.output_callback.switch_notebook(notebook) def on_message(self, message): \"Websocket", "if cell is None: return # There may be no cell if running", "self.notebook = None self.buffers = {} def reset(self): self._browser = None self.editors =", "elif session.browser and (status == 'comm_msg'): buffers = result['buffers'] metadata = result.get('metadata', {})", "import websocket import os import sys import json import webbrowser import nbformat from", "self.notebook.completion_info = None return if session.browser and (status == 'comm_open'): logging.info(\"REQUEST TO OPEN", "# e.g: # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO', 'target_module': None} return", "over the browser connection (currently assuming only one) if session.browser is not None:", "'target_module': None} return elif session.browser and (status == 'comm_msg'): buffers = result['buffers'] metadata", "STATIC_PATH class Session(object): \"\"\" Global state of the server that doesn't belong to", "{} @property def browser(self): return self._browser @browser.setter def browser(self, connection): if self._browser is", "OPEN COMM FOR JS: %s\" % result) self.notebook.message(session.browser, 'comm_open', result['content']) # e.g: #", "status == 'completion': position = self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position'] # Adjusted for emacs", "execution position = self.notebook.cell_position(cell) if execution_count is None: # For silent execution before", "type.\" try: payload = json.loads(message) except Exception as e: logging.info('JSON parse exception: %s'", "self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant 'comm_msg' {'msg_type': 'comm_msg', 'metadata': metadata, 'content': result['content']}, buffers=buffers)", "sys import json import webbrowser import nbformat from queue import Queue from .execute", "only one) if session.browser is not None: session.notebook.reload(session.browser) return editor_msg = session.notebook.dispatch(session.browser, payload)", "- result['cursor_start'] end_delta = relative_position - result['cursor_end'] result['cursor_start'] = position - start_delta result['cursor_end']", "one) if session.browser is not None: session.notebook.reload(session.browser) return editor_msg = session.notebook.dispatch(session.browser, payload) if", "opened\") def toggle_notebook(self, name): notebook = session.buffers.get(name, None) if notebook is None: #", "val = self.server.queue.get_nowait() self.server.queue.task_done() result, status = val except: return if status ==", "if payload.get('cmd', False) == 'reload_page': # Reload over the browser connection (currently assuming", "= connection session = Session() class PeriodicOutputCallback(object): \"\"\" Sets up a periodic callback", "# 'target_name': 'ZOO', 'target_module': None} return elif session.browser and (status == 'comm_msg'): buffers", "self._editor is not None: logging.info(\"WARNING: Only editor browser connection expected\") self._editor = connection", "# What about silent execution after start? self.notebook.update_cell_outputs(session.browser, position, outnode) class WS(websocket.WebSocketHandler): def", "buffers=buffers) return else: outnode, execution_count = result, status if session.browser: cell = self.notebook.find_cell(execution_count)", "of the server that doesn't belong to any single websocket connection but to", "if payload['cmd'] in ['start_mirror']: # Verbose commands logging.info(u\"Received %s command\" % payload['cmd']) else:", "tab open! self.toggle_notebook(payload['name']) if payload.get('cmd', False) == 'reload_page': # Reload over the browser", "end_delta = relative_position - result['cursor_end'] result['cursor_start'] = position - start_delta result['cursor_end'] = position", "mirroring) should happen even without a browser tab open! self.toggle_notebook(payload['name']) if payload.get('cmd', False)", "= [] self.notebook = None self.buffers = {} @property def browser(self): return self._browser", "None: # For silent execution before *any* output return # What about silent", "by the ThreadedExecutor. \"\"\" def __init__(self, server, period=20): self.server = server self.notebook =", "connection expected\") self._editor = connection session = Session() class PeriodicOutputCallback(object): \"\"\" Sets up", "== 'comm_open'): logging.info(\"REQUEST TO OPEN COMM FOR JS: %s\" % result) self.notebook.message(session.browser, 'comm_open',", "class Session(object): \"\"\" Global state of the server that doesn't belong to any", "{}) self.notebook.message(session.browser, 'comm_msg', # FIXME: redundant 'comm_msg' {'msg_type': 'comm_msg', 'metadata': metadata, 'content': result['content']},", "and (status == 'comm_msg'): buffers = result['buffers'] metadata = result.get('metadata', {}) self.notebook.message(session.browser, 'comm_msg',", "# There may be no cell if running a silent execution position =", "from .execute import ThreadedExecutor from .cells import ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH", "silent execution after start? self.notebook.update_cell_outputs(session.browser, position, outnode) class WS(websocket.WebSocketHandler): def open(self): self.queue =", "self.queue), name=name, cells=list()) session.buffers[name] = notebook session.notebook = notebook self.output_callback.switch_notebook(notebook) def on_message(self, message):", "in the browser, the CSS needs to be re-sent session.notebook.update_theme(self, css=None) return #", "= None self.buffers = {} @property def browser(self): return self._browser @browser.setter def browser(self,", "@property def browser(self): return self._browser @browser.setter def browser(self, connection): if self._browser is not", "the ThreadedExecutor. \"\"\" def __init__(self, server, period=20): self.server = server self.notebook = None", "'cmd' in payload: if payload['cmd'] in ['start_mirror']: # Verbose commands logging.info(u\"Received %s command\"", "client connection') session.editor = self return if payload.get('init', False) == 'browser': session.browser =", "to by ThreadedExecutor\" try: val = self.server.queue.get_nowait() self.server.queue.task_done() result, status = val except:", "redundant 'comm_msg' {'msg_type': 'comm_msg', 'metadata': metadata, 'content': result['content']}, buffers=buffers) return else: outnode, execution_count", "self.output_callback.stop() session.reset() return if payload.get('init', False) == 'editor': logging.info('Added editor client connection') session.editor", "self.notebook.completion_info['position'] relative_position = self.notebook.completion_info['relative_position'] # Adjusted for emacs point position start_delta = relative_position", "position - start_delta result['cursor_end'] = position - end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info =", "Queue from .execute import ThreadedExecutor from .cells import ExecutableNotebook STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client')", "self.editors = [] self.notebook = None self.buffers = {} @property def browser(self): return", "self.notebook = None self.period = period def switch_notebook(self, notebook): self.notebook = notebook def", "= position - end_delta session.editor.write_message(json.dumps({'cmd':'completion', 'data': result})) self.notebook.completion_info = None return if session.browser", "'comm_open'): logging.info(\"REQUEST TO OPEN COMM FOR JS: %s\" % result) self.notebook.message(session.browser, 'comm_open', result['content'])", "expected\") self._editor = connection session = Session() class PeriodicOutputCallback(object): \"\"\" Sets up a", "= self.notebook.cell_position(cell) if execution_count is None: # For silent execution before *any* output", "on_close(self): logging.info(\"ON_CLOSE\") if self is session.browser: session.browser = None self.output_callback.stop() def serve(ws_port=9999, html_port=8000,", "= self.server.queue.get_nowait() self.server.queue.task_done() result, status = val except: return if status == 'completion':", "tornado.web.Application([html_handler]).listen(html_port) ws_server = httpserver.HTTPServer(tornado.web.Application([(r\"/\", WS)])) ws_server.listen(ws_port, host) logging.info(\"STARTED: Server started and listening\") ioloop.IOLoop.instance().start()", "to be re-sent session.notebook.update_theme(self, css=None) return # SOME COMMANDS (e.g mirroring) should happen", "if session.browser is not None: session.notebook.reload(session.browser) return editor_msg = session.notebook.dispatch(session.browser, payload) if (editor_msg", "from tornado import websocket import os import sys import json import webbrowser import", "= None self.buffers = {} def reset(self): self._browser = None self.editors = []", "browser(self, connection): if self._browser is not None: logging.info(\"WARNING: Only one browser connection expected\")", "= None self.editors = [] self.notebook = None self.buffers = {} @property def", "re-sent session.notebook.update_theme(self, css=None) return # SOME COMMANDS (e.g mirroring) should happen even without", "self._editor = None self.notebook = None self.buffers = {} def reset(self): self._browser =", "= None return if session.browser and (status == 'comm_open'): logging.info(\"REQUEST TO OPEN COMM", "= {} @property def browser(self): return self._browser @browser.setter def browser(self, connection): if self._browser", "self.callback = ioloop.PeriodicCallback(self.__call__, self.period) self.callback.start() def stop(self): self.callback.stop() def __call__(self): \"Processes queue pushed", "# (for now) notebook = ExecutableNotebook( (ThreadedExecutor, \"threaded-kernel\", self.queue), name=name, cells=list()) session.buffers[name] =", "on_message(self, message): \"Websocket on_message handler. Tracks connection type.\" try: payload = json.loads(message) except", "return # There may be no cell if running a silent execution position", "os import sys import json import webbrowser import nbformat from queue import Queue", "What about silent execution after start? self.notebook.update_cell_outputs(session.browser, position, outnode) class WS(websocket.WebSocketHandler): def open(self):", "logging.info('Added editor client connection') session.editor = self return if payload.get('init', False) == 'browser':", "self.notebook = None self.buffers = {} @property def browser(self): return self._browser @browser.setter def", "STATIC_PATH = os.path.join(os.path.split(__file__)[0], 'client') ExecutableNotebook.STATIC_PATH = STATIC_PATH class Session(object): \"\"\" Global state of", "session = Session() class PeriodicOutputCallback(object): \"\"\" Sets up a periodic callback to push", "Create notebook # Note that there are multiple connections and we want only", "'target_name': 'ZOO', 'target_module': None} return elif session.browser and (status == 'comm_msg'): buffers =", "return # What about silent execution after start? self.notebook.update_cell_outputs(session.browser, position, outnode) class WS(websocket.WebSocketHandler):", "Only editor browser connection expected\") self._editor = connection session = Session() class PeriodicOutputCallback(object):", "for emacs point position start_delta = relative_position - result['cursor_start'] end_delta = relative_position -", "from tornado import ioloop from tornado import websocket import os import sys import", "@property def editor(self): return self._editor @editor.setter def editor(self, connection): if self._editor is not", "logging.info(\"ON_CLOSE\") if self is session.browser: session.browser = None self.output_callback.stop() def serve(ws_port=9999, html_port=8000, host='0.0.0.0'):", "session.notebook.reload(session.browser) return editor_msg = session.notebook.dispatch(session.browser, payload) if (editor_msg is not None) and (session.editor", "if self._editor is not None: logging.info(\"WARNING: Only editor browser connection expected\") self._editor =", "PeriodicOutputCallback(object): \"\"\" Sets up a periodic callback to push output to cells by", "happen even without a browser tab open! self.toggle_notebook(payload['name']) if payload.get('cmd', False) == 'reload_page':", "doesn't belong to any single websocket connection but to the whole server session.", "result['cursor_start'] end_delta = relative_position - result['cursor_end'] result['cursor_start'] = position - start_delta result['cursor_end'] =", "e.g: # {'data': {}, 'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO', 'target_module': None} return elif", "'comm_id': 'ee0a39d3728945cdb4ad30848b7856fc', # 'target_name': 'ZOO', 'target_module': None} return elif session.browser and (status ==", "'reload_page': # Reload over the browser connection (currently assuming only one) if session.browser", "None) and (session.editor is not None): session.editor.write_message(json.dumps(editor_msg)) def check_origin(self, origin): return True def", "a silent execution position = self.notebook.cell_position(cell) if execution_count is None: # For silent", "belong to any single websocket connection but to the whole server session. \"\"\"", "\"\"\" Global state of the server that doesn't belong to any single websocket", "queue pushed to by the ThreadedExecutor. \"\"\" def __init__(self, server, period=20): self.server =", "session.reset() return if payload.get('init', False) == 'editor': logging.info('Added editor client connection') session.editor =", "before *any* output return # What about silent execution after start? self.notebook.update_cell_outputs(session.browser, position,", "\"Websocket on_message handler. Tracks connection type.\" try: payload = json.loads(message) except Exception as", "notebook self.output_callback.switch_notebook(notebook) def on_message(self, message): \"Websocket on_message handler. Tracks connection type.\" try: payload", "return if payload.get('init', False) == 'browser': session.browser = self logging.info('Added browser client connection')", "reload in the browser, the CSS needs to be re-sent session.notebook.update_theme(self, css=None) return", "None self.buffers = {} @property def browser(self): return self._browser @browser.setter def browser(self, connection):", "one browser connection expected\") self._browser = connection @property def editor(self): return self._editor @editor.setter" ]
[ "from correios.entities import Objeto from correios.main import Correios __version__ = \"0.1.4\" __all__ =", "<reponame>rennancockles/rastreio-correios from correios.entities import Objeto from correios.main import Correios __version__ = \"0.1.4\" __all__", "import Objeto from correios.main import Correios __version__ = \"0.1.4\" __all__ = [\"Objeto\", \"Correios\"]", "correios.entities import Objeto from correios.main import Correios __version__ = \"0.1.4\" __all__ = [\"Objeto\"," ]
[ "None count_loss = 0 for i in range(target_count): target_bbox = target_bboxes[:, i, :]", "cur_loss = None select_k = None for k in k_keys: pred_bbox = pred_bboxes[:,", "pred_vectors: torch.Tensor, target_bboxes: torch.Tensor, ): pred_count = pred_bboxes.shape[1] target_count = target_bboxes.shape[1] k_keys =", "self.loss = IouDotsLoss(device) def forward( self, pred_vectors: torch.Tensor, target_bboxes: torch.Tensor, ): pred_count =", "+= cur_loss for k in k_keys: res_loss += 1 count_loss += 1 return", "= IouDotsLoss(device) def forward( self, pred_vectors: torch.Tensor, target_bboxes: torch.Tensor, ): pred_count = pred_bboxes.shape[1]", "tloss = self.loss(pred_bbox, target_bbox) if cur_loss is None: cur_loss = tloss select_k =", "res_loss = None count_loss = 0 for i in range(target_count): target_bbox = target_bboxes[:,", "= None for k in k_keys: pred_bbox = pred_bboxes[:, k, :][corrects_mask] #print(target_bbox.shape, pred_bbox.shape,", "self, pred_vectors: torch.Tensor, target_bboxes: torch.Tensor, ): pred_count = pred_bboxes.shape[1] target_count = target_bboxes.shape[1] k_keys", "cur_loss is not None: k_keys.remove(select_k) count_loss += 1 if res_loss is None: res_loss", "BboxLoss(torch.nn.Module): def __init__(self, device): super(BboxLoss, self).__init__() self.loss = IouDotsLoss(device) def forward( self, pred_vectors:", "= target_bbox.max(dim=1)[0] != 0 target_bbox = target_bbox[corrects_mask] cur_loss = None select_k = None", "= pred_bboxes[:, k, :][corrects_mask] #print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape) tloss = self.loss(pred_bbox, target_bbox) if cur_loss", "target_count = target_bboxes.shape[1] k_keys = list(range(pred_count)) res_loss = None count_loss = 0 for", "select_k = None for k in k_keys: pred_bbox = pred_bboxes[:, k, :][corrects_mask] #print(target_bbox.shape,", "count_loss += 1 if res_loss is None: res_loss = cur_loss else: res_loss +=", "corrects_mask.shape) tloss = self.loss(pred_bbox, target_bbox) if cur_loss is None: cur_loss = tloss select_k", "for i in range(target_count): target_bbox = target_bboxes[:, i, :] corrects_mask = target_bbox.max(dim=1)[0] !=", "k else: if tloss < cur_loss and tloss >= 0: cur_loss = tloss", "!= 0 target_bbox = target_bbox[corrects_mask] cur_loss = None select_k = None for k", "k_keys.remove(select_k) count_loss += 1 if res_loss is None: res_loss = cur_loss else: res_loss", "import torch class BboxLoss(torch.nn.Module): def __init__(self, device): super(BboxLoss, self).__init__() self.loss = IouDotsLoss(device) def", "= k if cur_loss is not None: k_keys.remove(select_k) count_loss += 1 if res_loss", "1 if res_loss is None: res_loss = cur_loss else: res_loss += cur_loss for", "if res_loss is None: res_loss = cur_loss else: res_loss += cur_loss for k", "target_bbox = target_bboxes[:, i, :] corrects_mask = target_bbox.max(dim=1)[0] != 0 target_bbox = target_bbox[corrects_mask]", "None for k in k_keys: pred_bbox = pred_bboxes[:, k, :][corrects_mask] #print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape)", "in k_keys: pred_bbox = pred_bboxes[:, k, :][corrects_mask] #print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape) tloss = self.loss(pred_bbox,", "k in k_keys: pred_bbox = pred_bboxes[:, k, :][corrects_mask] #print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape) tloss =", "self.loss(pred_bbox, target_bbox) if cur_loss is None: cur_loss = tloss select_k = k else:", "for k in k_keys: pred_bbox = pred_bboxes[:, k, :][corrects_mask] #print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape) tloss", ":][corrects_mask] #print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape) tloss = self.loss(pred_bbox, target_bbox) if cur_loss is None: cur_loss", "select_k = k else: if tloss < cur_loss and tloss >= 0: cur_loss", "= target_bboxes.shape[1] k_keys = list(range(pred_count)) res_loss = None count_loss = 0 for i", "= target_bboxes[:, i, :] corrects_mask = target_bbox.max(dim=1)[0] != 0 target_bbox = target_bbox[corrects_mask] cur_loss", "tloss >= 0: cur_loss = tloss select_k = k if cur_loss is not", "k, :][corrects_mask] #print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape) tloss = self.loss(pred_bbox, target_bbox) if cur_loss is None:", ":] corrects_mask = target_bbox.max(dim=1)[0] != 0 target_bbox = target_bbox[corrects_mask] cur_loss = None select_k", "if tloss < cur_loss and tloss >= 0: cur_loss = tloss select_k =", "if cur_loss is None: cur_loss = tloss select_k = k else: if tloss", "tloss < cur_loss and tloss >= 0: cur_loss = tloss select_k = k", "= None count_loss = 0 for i in range(target_count): target_bbox = target_bboxes[:, i,", "cur_loss is None: cur_loss = tloss select_k = k else: if tloss <", "i, :] corrects_mask = target_bbox.max(dim=1)[0] != 0 target_bbox = target_bbox[corrects_mask] cur_loss = None", "not None: k_keys.remove(select_k) count_loss += 1 if res_loss is None: res_loss = cur_loss", "tloss select_k = k if cur_loss is not None: k_keys.remove(select_k) count_loss += 1", "< cur_loss and tloss >= 0: cur_loss = tloss select_k = k if", "i in range(target_count): target_bbox = target_bboxes[:, i, :] corrects_mask = target_bbox.max(dim=1)[0] != 0", "target_bbox = target_bbox[corrects_mask] cur_loss = None select_k = None for k in k_keys:", "pred_bbox.shape, corrects_mask.shape) tloss = self.loss(pred_bbox, target_bbox) if cur_loss is None: cur_loss = tloss", "class BboxLoss(torch.nn.Module): def __init__(self, device): super(BboxLoss, self).__init__() self.loss = IouDotsLoss(device) def forward( self,", "#print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape) tloss = self.loss(pred_bbox, target_bbox) if cur_loss is None: cur_loss =", "forward( self, pred_vectors: torch.Tensor, target_bboxes: torch.Tensor, ): pred_count = pred_bboxes.shape[1] target_count = target_bboxes.shape[1]", "__init__(self, device): super(BboxLoss, self).__init__() self.loss = IouDotsLoss(device) def forward( self, pred_vectors: torch.Tensor, target_bboxes:", "self).__init__() self.loss = IouDotsLoss(device) def forward( self, pred_vectors: torch.Tensor, target_bboxes: torch.Tensor, ): pred_count", "if cur_loss is not None: k_keys.remove(select_k) count_loss += 1 if res_loss is None:", "target_bbox[corrects_mask] cur_loss = None select_k = None for k in k_keys: pred_bbox =", "target_bboxes.shape[1] k_keys = list(range(pred_count)) res_loss = None count_loss = 0 for i in", "= tloss select_k = k else: if tloss < cur_loss and tloss >=", "is None: res_loss = cur_loss else: res_loss += cur_loss for k in k_keys:", "k_keys = list(range(pred_count)) res_loss = None count_loss = 0 for i in range(target_count):", "): pred_count = pred_bboxes.shape[1] target_count = target_bboxes.shape[1] k_keys = list(range(pred_count)) res_loss = None", "0 for i in range(target_count): target_bbox = target_bboxes[:, i, :] corrects_mask = target_bbox.max(dim=1)[0]", "= list(range(pred_count)) res_loss = None count_loss = 0 for i in range(target_count): target_bbox", "= target_bbox[corrects_mask] cur_loss = None select_k = None for k in k_keys: pred_bbox", "cur_loss = tloss select_k = k if cur_loss is not None: k_keys.remove(select_k) count_loss", "res_loss = cur_loss else: res_loss += cur_loss for k in k_keys: res_loss +=", "else: res_loss += cur_loss for k in k_keys: res_loss += 1 count_loss +=", "else: if tloss < cur_loss and tloss >= 0: cur_loss = tloss select_k", "corrects_mask = target_bbox.max(dim=1)[0] != 0 target_bbox = target_bbox[corrects_mask] cur_loss = None select_k =", "cur_loss = tloss select_k = k else: if tloss < cur_loss and tloss", "list(range(pred_count)) res_loss = None count_loss = 0 for i in range(target_count): target_bbox =", "k_keys: pred_bbox = pred_bboxes[:, k, :][corrects_mask] #print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape) tloss = self.loss(pred_bbox, target_bbox)", "pred_count = pred_bboxes.shape[1] target_count = target_bboxes.shape[1] k_keys = list(range(pred_count)) res_loss = None count_loss", "= tloss select_k = k if cur_loss is not None: k_keys.remove(select_k) count_loss +=", "in range(target_count): target_bbox = target_bboxes[:, i, :] corrects_mask = target_bbox.max(dim=1)[0] != 0 target_bbox", "def __init__(self, device): super(BboxLoss, self).__init__() self.loss = IouDotsLoss(device) def forward( self, pred_vectors: torch.Tensor,", "and tloss >= 0: cur_loss = tloss select_k = k if cur_loss is", "+= 1 if res_loss is None: res_loss = cur_loss else: res_loss += cur_loss", "pred_bbox = pred_bboxes[:, k, :][corrects_mask] #print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape) tloss = self.loss(pred_bbox, target_bbox) if", "= 0 for i in range(target_count): target_bbox = target_bboxes[:, i, :] corrects_mask =", "torch class BboxLoss(torch.nn.Module): def __init__(self, device): super(BboxLoss, self).__init__() self.loss = IouDotsLoss(device) def forward(", "0 target_bbox = target_bbox[corrects_mask] cur_loss = None select_k = None for k in", "pred_bboxes[:, k, :][corrects_mask] #print(target_bbox.shape, pred_bbox.shape, corrects_mask.shape) tloss = self.loss(pred_bbox, target_bbox) if cur_loss is", "= pred_bboxes.shape[1] target_count = target_bboxes.shape[1] k_keys = list(range(pred_count)) res_loss = None count_loss =", "cur_loss else: res_loss += cur_loss for k in k_keys: res_loss += 1 count_loss", "= self.loss(pred_bbox, target_bbox) if cur_loss is None: cur_loss = tloss select_k = k", "torch.Tensor, ): pred_count = pred_bboxes.shape[1] target_count = target_bboxes.shape[1] k_keys = list(range(pred_count)) res_loss =", "is not None: k_keys.remove(select_k) count_loss += 1 if res_loss is None: res_loss =", "None select_k = None for k in k_keys: pred_bbox = pred_bboxes[:, k, :][corrects_mask]", "None: k_keys.remove(select_k) count_loss += 1 if res_loss is None: res_loss = cur_loss else:", "pred_bboxes.shape[1] target_count = target_bboxes.shape[1] k_keys = list(range(pred_count)) res_loss = None count_loss = 0", "is None: cur_loss = tloss select_k = k else: if tloss < cur_loss", "0: cur_loss = tloss select_k = k if cur_loss is not None: k_keys.remove(select_k)", "res_loss is None: res_loss = cur_loss else: res_loss += cur_loss for k in", "range(target_count): target_bbox = target_bboxes[:, i, :] corrects_mask = target_bbox.max(dim=1)[0] != 0 target_bbox =", "select_k = k if cur_loss is not None: k_keys.remove(select_k) count_loss += 1 if", "cur_loss for k in k_keys: res_loss += 1 count_loss += 1 return res_loss/count_loss", "= None select_k = None for k in k_keys: pred_bbox = pred_bboxes[:, k,", "cur_loss and tloss >= 0: cur_loss = tloss select_k = k if cur_loss", "None: res_loss = cur_loss else: res_loss += cur_loss for k in k_keys: res_loss", "target_bbox.max(dim=1)[0] != 0 target_bbox = target_bbox[corrects_mask] cur_loss = None select_k = None for", "tloss select_k = k else: if tloss < cur_loss and tloss >= 0:", "res_loss += cur_loss for k in k_keys: res_loss += 1 count_loss += 1", "None: cur_loss = tloss select_k = k else: if tloss < cur_loss and", "k if cur_loss is not None: k_keys.remove(select_k) count_loss += 1 if res_loss is", "super(BboxLoss, self).__init__() self.loss = IouDotsLoss(device) def forward( self, pred_vectors: torch.Tensor, target_bboxes: torch.Tensor, ):", "device): super(BboxLoss, self).__init__() self.loss = IouDotsLoss(device) def forward( self, pred_vectors: torch.Tensor, target_bboxes: torch.Tensor,", "= k else: if tloss < cur_loss and tloss >= 0: cur_loss =", "target_bboxes[:, i, :] corrects_mask = target_bbox.max(dim=1)[0] != 0 target_bbox = target_bbox[corrects_mask] cur_loss =", "count_loss = 0 for i in range(target_count): target_bbox = target_bboxes[:, i, :] corrects_mask", "target_bbox) if cur_loss is None: cur_loss = tloss select_k = k else: if", "torch.Tensor, target_bboxes: torch.Tensor, ): pred_count = pred_bboxes.shape[1] target_count = target_bboxes.shape[1] k_keys = list(range(pred_count))", "= cur_loss else: res_loss += cur_loss for k in k_keys: res_loss += 1", "def forward( self, pred_vectors: torch.Tensor, target_bboxes: torch.Tensor, ): pred_count = pred_bboxes.shape[1] target_count =", "target_bboxes: torch.Tensor, ): pred_count = pred_bboxes.shape[1] target_count = target_bboxes.shape[1] k_keys = list(range(pred_count)) res_loss", "IouDotsLoss(device) def forward( self, pred_vectors: torch.Tensor, target_bboxes: torch.Tensor, ): pred_count = pred_bboxes.shape[1] target_count", ">= 0: cur_loss = tloss select_k = k if cur_loss is not None:" ]
[ "# Generated by Django 4.0.1 on 2022-01-13 17:53 from django.db import migrations, models", "on 2022-01-13 17:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "17:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('points', '0006_alter_spend_receipt'),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('points', '0006_alter_spend_receipt'), ] operations =", "migrations, models class Migration(migrations.Migration): dependencies = [ ('points', '0006_alter_spend_receipt'), ] operations = [", "Django 4.0.1 on 2022-01-13 17:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('points', '0006_alter_spend_receipt'), ]", "2022-01-13 17:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('points',", "by Django 4.0.1 on 2022-01-13 17:53 from django.db import migrations, models class Migration(migrations.Migration):", "Generated by Django 4.0.1 on 2022-01-13 17:53 from django.db import migrations, models class", "class Migration(migrations.Migration): dependencies = [ ('points', '0006_alter_spend_receipt'), ] operations = [ migrations.AlterField( model_name='spend',", "Migration(migrations.Migration): dependencies = [ ('points', '0006_alter_spend_receipt'), ] operations = [ migrations.AlterField( model_name='spend', name='receipt',", "4.0.1 on 2022-01-13 17:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "<gh_stars>0 # Generated by Django 4.0.1 on 2022-01-13 17:53 from django.db import migrations,", "dependencies = [ ('points', '0006_alter_spend_receipt'), ] operations = [ migrations.AlterField( model_name='spend', name='receipt', field=models.JSONField(),", "[ ('points', '0006_alter_spend_receipt'), ] operations = [ migrations.AlterField( model_name='spend', name='receipt', field=models.JSONField(), ), ]", "= [ ('points', '0006_alter_spend_receipt'), ] operations = [ migrations.AlterField( model_name='spend', name='receipt', field=models.JSONField(), ),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('points', '0006_alter_spend_receipt'), ] operations", "models class Migration(migrations.Migration): dependencies = [ ('points', '0006_alter_spend_receipt'), ] operations = [ migrations.AlterField(" ]
[ "log_dir = log_dir + 'cifar10_resnet50_v1' num_classes = 10 if os.path.exists(log_dir) is False: os.mkdir(log_dir)", "DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net", "= net(images) outputs = F.softmax(outputs, dim=1) loss = criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds", "+= torch.sum((pred == labels)).item() valid_loss += loss num_batch += 1 num_samples += images.size(0)", "nn from torchvision.datasets import CIFAR10 from torch.optim import Adam from torchvision.models import resnet50", "outputs = net(images) outputs = F.softmax(outputs, dim=1) loss = criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标)", "start_time = time.time() best_acc = 0.0 num_step = 0 # 开始周期训练 for epoch", "更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), model_dir) print(' epoch:{}, update model...'.format(epoch)) print() # 训练结束时间、输出最好的精度", "model_dir = model_dir + 'cifar10_resnet50_v1' + '.pth' log_dir = log_dir + 'cifar10_resnet50_v1' num_classes", "= root_data_save + 'log/' # 若文件夹不存在,则创建 if os.path.exists(root_data_save) is False: os.mkdir(root_data_save) if os.path.exists(model_dir)", "time.time() - start_time print('Training complete in {:.0f}m {:.0f}s'.format(end_time // 60, end_time % 60))", "std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir,", "/ num_samples train_loss = train_loss / num_batch return train_acc, train_loss, num_step # 验证子函数", "data images = images.to(device) labels = labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images) outputs", "os.path.exists(root_data_save) is False: os.mkdir(root_data_save) if os.path.exists(model_dir) is False: os.mkdir(model_dir) if os.path.exists(log_dir) is False:", "计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss = valid_loss / num_batch return valid_acc,", "is False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset + 'cifar-10/' model_dir = model_dir", "batch_size = 32 learning_rate = 0.1 lr_step_size = 30 weight_decay = 1e-4 momentum", "= DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50() print(net) # 重写网络的最后一层", "train_loss += loss num_batch += 1 num_samples += images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step()", "'/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset = 'Dataset/' # root_project = '' # root_data_save", "检查设备情况 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('device:{}'.format(device)) # 设置超参数 epochs =", "= net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) # 将网络放置到GPU上 net.to(device)", "enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images, labels = data images = images.to(device) labels =", "num_samples = 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for images, labels in", "'/home/xiaonan/experients/' # self.root_data_save = '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset = 'Dataset/' # root_project", "#transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)),", "+ 'log/' # 若文件夹不存在,则创建 if os.path.exists(root_data_save) is False: os.mkdir(root_data_save) if os.path.exists(model_dir) is False:", "0 # 开始周期训练 for epoch in range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time = time.time() print('Epoch", "= criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss += loss", "# 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) % 30", "valid_loss, 'valid_acc': valid_acc}, global_step=epoch) # 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc", "num_step): print(' training stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0", "format(epoch, epochs, epoch_time // 60, epoch_time % 60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'.", "loss num_batch += 1 num_samples += images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad() #", "# 若文件夹不存在,则创建 if os.path.exists(root_data_save) is False: os.mkdir(root_data_save) if os.path.exists(model_dir) is False: os.mkdir(model_dir) if", "torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('device:{}'.format(device)) # 设置超参数 epochs = 200 batch_size =", "0.456, 0.406] std = [0.229, 0.224, 0.255] # -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess =", "% 60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss, valid_acc)) # 记录测试结果", "valid_acc += torch.sum((pred == labels)).item() valid_loss += loss num_batch += 1 num_samples +=", "fc_in_features = net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) # 将网络放置到GPU上", "os.mkdir(model_dir) if os.path.exists(log_dir) is False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset + 'cifar-10/'", "train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_loader = DataLoader(dataset=test_dataset,", "0.224, 0.255] # -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4,", "os.path.exists(log_dir) is False: os.mkdir(log_dir) # 检查设备情况 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')", "# 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time() best_acc = 0.0 num_step = 0 # 开始周期训练", "= 0 num_samples = 0 # 进行网络的训练 for index, data in enumerate(train_loader, start=0):", "= 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for images, labels in valid_loader:", "complete in {:.0f}m {:.0f}s'.format(end_time // 60, end_time % 60)) print('Best val Acc: {:4f}'.format(best_acc))", "import SummaryWriter import torch.nn.functional as F import os import time # ---------------------配置阶段------------------------------ #", "= '/home/xiaonan/experients/' # self.root_data_save = '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset = 'Dataset/' #", "= Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器 writer = SummaryWriter(log_dir) # ------------------定义训练、验证子函数--------------------", "train_acc = train_acc / num_samples train_loss = train_loss / num_batch return train_acc, train_loss,", "valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss, valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc, 'valid_loss':", "labels = labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images) outputs = F.softmax(outputs, dim=1) loss", "for images, labels in valid_loader: # 将测试数据放入GPU上 images, labels = images.to(device), labels.to(device) #", "0.9 # 均值和标准差值 mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.255]", "as F import os import time # ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/'", "# self.root_project = '/home/xiaonan/experients/' # self.root_data_save = '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset =", "transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size,", "= DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------", "import resnet50 from torch.utils.data import DataLoader import torchvision.transforms as transforms from torch.utils.tensorboard import", "valid_loss = _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time() - epoch_start_time print(' epoch:{}/{}, time:{:.0f}m", "torch.utils.data import DataLoader import torchvision.transforms as transforms from torch.utils.tensorboard import SummaryWriter import torch.nn.functional", "不使用梯度,减少内存占用 for images, labels in valid_loader: # 将测试数据放入GPU上 images, labels = images.to(device), labels.to(device)", "batch_size=batch_size, shuffle=True, num_workers=4) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net =", "# 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for images, labels in valid_loader: # 将测试数据放入GPU上", "labels).item() train_acc += acc train_loss += loss num_batch += 1 num_samples += images.size(0)", "time # ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/' root_data_save =", "# 设置超参数 epochs = 200 batch_size = 32 learning_rate = 0.1 lr_step_size =", "'log/' # 若文件夹不存在,则创建 if os.path.exists(root_data_save) is False: os.mkdir(root_data_save) if os.path.exists(model_dir) is False: os.mkdir(model_dir)", "= SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- # 训练子函数 def _train(train_loader, num_step): print(' training stage....') #", "outputs = F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs,", "images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images) outputs = F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值", "net = resnet50() print(net) # 重写网络的最后一层 fc_in_features = net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features) net.fc", "# 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc += acc train_loss += loss", "optimizer = Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器 writer = SummaryWriter(log_dir) #", "DataLoader import torchvision.transforms as transforms from torch.utils.tensorboard import SummaryWriter import torch.nn.functional as F", "model_dir = root_data_save + 'checkpoints/' log_dir = root_data_save + 'log/' # 若文件夹不存在,则创建 if", "数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) #", "in enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images, labels = data images = images.to(device) labels", "transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess =", "------------------定义训练、验证子函数-------------------- # 训练子函数 def _train(train_loader, num_step): print(' training stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad()", "best_acc = valid_acc torch.save(net.state_dict(), model_dir) print(' epoch:{}, update model...'.format(epoch)) print() # 训练结束时间、输出最好的精度 end_time", "torch.no_grad(): # 不使用梯度,减少内存占用 for images, labels in valid_loader: # 将测试数据放入GPU上 images, labels =", "in {:.0f}m {:.0f}s'.format(end_time // 60, end_time % 60)) print('Best val Acc: {:4f}'.format(best_acc)) #", "import torch.nn.functional as F import os import time # ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset", "lr_step_size = 30 weight_decay = 1e-4 momentum = 0.9 # 均值和标准差值 mean =", "transforms from torch.utils.tensorboard import SummaryWriter import torch.nn.functional as F import os import time", "labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss += loss num_batch +=", "train_acc, train_loss, num_step = _train(train_loader, num_step) # 验证 valid_acc, valid_loss = _valid(test_loader) #", "root_data_save = '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset = '/home/xiaonan/Dataset/' # self.root_project = '/home/xiaonan/experients/'", "as nn from torchvision.datasets import CIFAR10 from torch.optim import Adam from torchvision.models import", "# 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss = train_loss / num_batch return", "import Adam from torchvision.models import resnet50 from torch.utils.data import DataLoader import torchvision.transforms as", "learning_rate = 0.1 lr_step_size = 30 weight_decay = 1e-4 momentum = 0.9 #", "acc = torch.sum(preds == labels).item() train_acc += acc train_loss += loss num_batch +=", "# 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index, loss, acc / images.size(0))) # 记录训练批次的损失和准确率", "# 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc", "train_acc += acc train_loss += loss num_batch += 1 num_samples += images.size(0) #", "stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0 train_loss = 0.0", "os import time # ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/'", "tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc, 'valid_loss': valid_loss, 'valid_acc': valid_acc}, global_step=epoch) # 选出最好的模型参数 if valid_acc", "= 32 learning_rate = 0.1 lr_step_size = 30 weight_decay = 1e-4 momentum =", "记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc, 'valid_loss': valid_loss, 'valid_acc': valid_acc}, global_step=epoch) # 选出最好的模型参数", "acc train_loss += loss num_batch += 1 num_samples += images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward()", "train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])", "统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss += loss num_batch += 1 num_samples", "# -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4,", "F import os import time # ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/' root_project", "resnet50() print(net) # 重写网络的最后一层 fc_in_features = net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features,", "# 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images) outputs = F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred =", "'/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset = '/home/xiaonan/Dataset/'", "= torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred ==", "criterion = nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器 writer", "print('device:{}'.format(device)) # 设置超参数 epochs = 200 batch_size = 32 learning_rate = 0.1 lr_step_size", "'batch_accuracy': acc / images.size(0)}, global_step=num_step) # 更新全局步骤 num_step += 1 # 计算训练的准确率和损失值 train_acc", "更新全局步骤 num_step += 1 # 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss =", "valid_acc, valid_loss = _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time() - epoch_start_time print(' epoch:{}/{},", "dim=1) loss = criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例", "== labels)).item() valid_loss += loss num_batch += 1 num_samples += images.size(0) # 计算测试精度和损失值", "log_dir = root_data_save + 'log/' # 若文件夹不存在,则创建 if os.path.exists(root_data_save) is False: os.mkdir(root_data_save) if", "print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc = 0.0 valid_loss = 0.0 num_batch", "输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index, loss, acc / images.size(0))) # 记录训练批次的损失和准确率 #", "transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集 train_dataset =", "images, labels in valid_loader: # 将测试数据放入GPU上 images, labels = images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围", "= '/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset = '/home/xiaonan/Dataset/' # self.root_project", "= time.time() print('Epoch {}/{}'.format(epoch, epochs - 1)) print('-' * 20) # 训练 train_acc,", "+= 1 num_samples += images.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss", "= 0.9 # 均值和标准差值 mean = [0.485, 0.456, 0.406] std = [0.229, 0.224,", "valid_loss = 0.0 num_batch = 0 num_samples = 0 # 进行测试集的测试 with torch.no_grad():", "contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集", "criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss += loss num_batch", "batch_acc:{:.4f}\\n'. format(index, loss, acc / images.size(0))) # 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) #", "dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc += acc", "if (index + 1) % 30 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f},", "# writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc / images.size(0)},", "root_data_save + 'checkpoints/' log_dir = root_data_save + 'log/' # 若文件夹不存在,则创建 if os.path.exists(root_data_save) is", "num_batch return train_acc, train_loss, num_step # 验证子函数 def _valid(valid_loader): print(' valid stage...') #", "root_data_save + 'log/' # 若文件夹不存在,则创建 if os.path.exists(root_data_save) is False: os.mkdir(root_data_save) if os.path.exists(model_dir) is", "batch_size=batch_size, shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50() print(net) # 重写网络的最后一层 fc_in_features =", "return valid_acc, valid_loss # ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time() best_acc = 0.0", "train_acc, train_loss, num_step # 验证子函数 def _valid(valid_loader): print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval()", "print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) # 将网络放置到GPU上 net.to(device) # 定义损失函数和优化器 criterion =", "in range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time = time.time() print('Epoch {}/{}'.format(epoch, epochs - 1)) print('-'", "30 weight_decay = 1e-4 momentum = 0.9 # 均值和标准差值 mean = [0.485, 0.456,", "in valid_loader: # 将测试数据放入GPU上 images, labels = images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs =", "transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True,", "optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) % 30 == 0: #", "os.mkdir(log_dir) # 检查设备情况 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('device:{}'.format(device)) # 设置超参数", "valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir, train=True,", "shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50() print(net) # 重写网络的最后一层 fc_in_features = net.fc.in_features", "print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch, epochs, epoch_time // 60, epoch_time % 60)) print('", "F.softmax(outputs, dim=1) loss = criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) #", "end_time = time.time() - start_time print('Training complete in {:.0f}m {:.0f}s'.format(end_time // 60, end_time", "if os.path.exists(log_dir) is False: os.mkdir(log_dir) # 检查设备情况 device = torch.device('cuda:0' if torch.cuda.is_available() else", "网络最后一层的输入通道 print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) # 将网络放置到GPU上 net.to(device) # 定义损失函数和优化器 criterion", "'cifar10_resnet50_v1' num_classes = 10 if os.path.exists(log_dir) is False: os.mkdir(log_dir) # 检查设备情况 device =", "time.time() print('Epoch {}/{}'.format(epoch, epochs - 1)) print('-' * 20) # 训练 train_acc, train_loss,", "= valid_acc torch.save(net.state_dict(), model_dir) print(' epoch:{}, update model...'.format(epoch)) print() # 训练结束时间、输出最好的精度 end_time =", "# 训练子函数 def _train(train_loader, num_step): print(' training stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() #", "= _train(train_loader, num_step) # 验证 valid_acc, valid_loss = _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time =", "global_step=epoch) # 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(),", "# ----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器 writer = SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- # 训练子函数 def", "0.0 train_loss = 0.0 num_batch = 0 num_samples = 0 # 进行网络的训练 for", "= 0 # 开始周期训练 for epoch in range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time = time.time()", "for epoch in range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time = time.time() print('Epoch {}/{}'.format(epoch, epochs -", "= '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset = 'Dataset/' # root_project = '' #", "transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean,", "os.path.exists(model_dir) is False: os.mkdir(model_dir) if os.path.exists(log_dir) is False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir =", "epoch_time % 60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss, valid_acc)) #", "= 1e-4 momentum = 0.9 # 均值和标准差值 mean = [0.485, 0.456, 0.406] std", "train_loss = train_loss / num_batch return train_acc, train_loss, num_step # 验证子函数 def _valid(valid_loader):", "获得记录日志信息的写入器 writer = SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- # 训练子函数 def _train(train_loader, num_step): print(' training", "60, epoch_time % 60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss, valid_acc))", "from torch.optim import Adam from torchvision.models import resnet50 from torch.utils.data import DataLoader import", "epoch_time // 60, epoch_time % 60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc,", "torch.nn as nn from torchvision.datasets import CIFAR10 from torch.optim import Adam from torchvision.models", "# 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds", "import DataLoader import torchvision.transforms as transforms from torch.utils.tensorboard import SummaryWriter import torch.nn.functional as", "hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) #", "# 输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time() - epoch_start_time print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch, epochs,", "'' # root_data_save = 'data_save/' # 模型保存目录、日志文件保存目录 model_dir = root_data_save + 'checkpoints/' log_dir", "1e-4 momentum = 0.9 # 均值和标准差值 mean = [0.485, 0.456, 0.406] std =", "time.time() best_acc = 0.0 num_step = 0 # 开始周期训练 for epoch in range(epochs):", "num_step += 1 # 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss = train_loss", "= 0 # 进行网络的训练 for index, data in enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images,", "{:.0f}m {:.0f}s'.format(end_time // 60, end_time % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # 关闭writer", "{}/{}'.format(epoch, epochs - 1)) print('-' * 20) # 训练 train_acc, train_loss, num_step =", "获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4)", "momentum = 0.9 # 均值和标准差值 mean = [0.485, 0.456, 0.406] std = [0.229,", "torch import torch.nn as nn from torchvision.datasets import CIFAR10 from torch.optim import Adam", "(index + 1) % 30 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'.", "test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50() print(net) #", "选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), model_dir) print('", "update model...'.format(epoch)) print() # 训练结束时间、输出最好的精度 end_time = time.time() - start_time print('Training complete in", "print(net) # 将网络放置到GPU上 net.to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(), weight_decay=weight_decay)", "# 开始周期训练 for epoch in range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time = time.time() print('Epoch {}/{}'.format(epoch,", "std=std)]) # 获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess)", "= '/home/xiaonan/Dataset/' # self.root_project = '/home/xiaonan/experients/' # self.root_data_save = '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) #", "若文件夹不存在,则创建 if os.path.exists(root_data_save) is False: os.mkdir(root_data_save) if os.path.exists(model_dir) is False: os.mkdir(model_dir) if os.path.exists(log_dir)", "定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0 train_loss = 0.0 num_batch = 0 num_samples = 0", "torch.optim import Adam from torchvision.models import resnet50 from torch.utils.data import DataLoader import torchvision.transforms", "def _valid(valid_loader): print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc = 0.0 valid_loss =", "time.time() - epoch_start_time print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch, epochs, epoch_time // 60, epoch_time", "= 'data_save/' # 模型保存目录、日志文件保存目录 model_dir = root_data_save + 'checkpoints/' log_dir = root_data_save +", "# 模型保存目录、日志文件保存目录 model_dir = root_data_save + 'checkpoints/' log_dir = root_data_save + 'log/' #", "model_dir) print(' epoch:{}, update model...'.format(epoch)) print() # 训练结束时间、输出最好的精度 end_time = time.time() - start_time", "输出一定次数的损失和精度情况 if (index + 1) % 30 == 0: # 输出损失值和精度值 print(' batch:{},", "train_loss, num_step = _train(train_loader, num_step) # 验证 valid_acc, valid_loss = _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率", "- epoch_start_time print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch, epochs, epoch_time // 60, epoch_time %", "# 训练结束时间、输出最好的精度 end_time = time.time() - start_time print('Training complete in {:.0f}m {:.0f}s'.format(end_time //", "format(index, loss, acc / images.size(0))) # 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签", "if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), model_dir) print(' epoch:{},", "root_project = '' # root_data_save = 'data_save/' # 模型保存目录、日志文件保存目录 model_dir = root_data_save +", "获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器", "0 num_samples = 0 # 进行网络的训练 for index, data in enumerate(train_loader, start=0): #", "= time.time() best_acc = 0.0 num_step = 0 # 开始周期训练 for epoch in", "# 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0 train_loss = 0.0 num_batch", "transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集 train_dataset", "# 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset = 'Dataset/' # root_project = '' # root_data_save =", "+= 1 # 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss = train_loss /", "# 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), model_dir)", "验证子函数 def _valid(valid_loader): print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc = 0.0 valid_loss", "num_samples valid_loss = valid_loss / num_batch return valid_acc, valid_loss # ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数", "nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) # 将网络放置到GPU上 net.to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer =", "开始周期训练 for epoch in range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time = time.time() print('Epoch {}/{}'.format(epoch, epochs", "/ num_batch return train_acc, train_loss, num_step # 验证子函数 def _valid(valid_loader): print(' valid stage...')", "= net(images) outputs = F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss", "= '/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset =", "'valid_loss': valid_loss, 'valid_acc': valid_acc}, global_step=epoch) # 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数", "设置超参数 epochs = 200 batch_size = 32 learning_rate = 0.1 lr_step_size = 30", "'cpu') print('device:{}'.format(device)) # 设置超参数 epochs = 200 batch_size = 32 learning_rate = 0.1", "valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc, 'valid_loss': valid_loss, 'valid_acc': valid_acc}, global_step=epoch)", "= transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess)", "log_dir + 'cifar10_resnet50_v1' num_classes = 10 if os.path.exists(log_dir) is False: os.mkdir(log_dir) # 检查设备情况", "valid_loss # ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time() best_acc = 0.0 num_step =", "labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc =", "train_loss = 0.0 num_batch = 0 num_samples = 0 # 进行网络的训练 for index,", "images.to(device) labels = labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images) outputs = F.softmax(outputs, dim=1)", "labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images) outputs = F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred", "import torchvision.transforms as transforms from torch.utils.tensorboard import SummaryWriter import torch.nn.functional as F import", "train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) #", "0.406] std = [0.229, 0.224, 0.255] # -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224,", "loss = criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 #", "单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc / images.size(0)}, global_step=num_step) # 更新全局步骤 num_step +=", "= CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_loader", "print(' train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss, valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss':", "训练 train_acc, train_loss, num_step = _train(train_loader, num_step) # 验证 valid_acc, valid_loss = _valid(test_loader)", "---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/' #", "transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir, train=False,", "= _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time() - epoch_start_time print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'.", "# 将测试数据放入GPU上 images, labels = images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images) outputs", "def _train(train_loader, num_step): print(' training stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc", "+= images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1)", "mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.255] # -----------------------------读取数据集-------------------------------- #", "False: os.mkdir(log_dir) # 检查设备情况 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('device:{}'.format(device)) #", "nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器 writer = SummaryWriter(log_dir)", "torchvision.datasets import CIFAR10 from torch.optim import Adam from torchvision.models import resnet50 from torch.utils.data", "transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir,", "epoch in range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time = time.time() print('Epoch {}/{}'.format(epoch, epochs - 1))", "= 30 weight_decay = 1e-4 momentum = 0.9 # 均值和标准差值 mean = [0.485,", "= criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量", "images.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss = valid_loss / num_batch", "valid_acc / num_samples valid_loss = valid_loss / num_batch return valid_acc, valid_loss # ----------------------------开始周期训练--------------------------------", "optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) % 30 == 0: # 输出损失值和精度值", "+ 'checkpoints/' log_dir = root_data_save + 'log/' # 若文件夹不存在,则创建 if os.path.exists(root_data_save) is False:", "= 0.0 train_loss = 0.0 num_batch = 0 num_samples = 0 # 进行网络的训练", "else 'cpu') print('device:{}'.format(device)) # 设置超参数 epochs = 200 batch_size = 32 learning_rate =", "# 将网络放置到GPU上 net.to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(), weight_decay=weight_decay) #", "train_acc / num_samples train_loss = train_loss / num_batch return train_acc, train_loss, num_step #", "loss, acc / images.size(0))) # 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)',", "loss num_batch += 1 num_samples += images.size(0) # 计算测试精度和损失值 valid_acc = valid_acc /", "# self.root_data_save = '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset = 'Dataset/' # root_project =", "1 # 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss = train_loss / num_batch", "_valid(valid_loader): print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc = 0.0 valid_loss = 0.0", "num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50() print(net) # 重写网络的最后一层 fc_in_features = net.fc.in_features #", "0.0 num_batch = 0 num_samples = 0 # 进行网络的训练 for index, data in", "start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images, labels = data images = images.to(device) labels = labels.to(device)", "torch.sum((pred == labels)).item() valid_loss += loss num_batch += 1 num_samples += images.size(0) #", "epoch_start_time = time.time() print('Epoch {}/{}'.format(epoch, epochs - 1)) print('-' * 20) # 训练", "= torch.sum(preds == labels).item() train_acc += acc train_loss += loss num_batch += 1", "设定每周期开始时间点、周期信息 epoch_start_time = time.time() print('Epoch {}/{}'.format(epoch, epochs - 1)) print('-' * 20) #", "CIFAR10 from torch.optim import Adam from torchvision.models import resnet50 from torch.utils.data import DataLoader", "out_features=num_classes) print(net) # 将网络放置到GPU上 net.to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(),", "----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器 writer = SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- # 训练子函数 def _train(train_loader,", "= time.time() - epoch_start_time print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch, epochs, epoch_time // 60,", "== 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index, loss, acc / images.size(0)))", "range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time = time.time() print('Epoch {}/{}'.format(epoch, epochs - 1)) print('-' *", "反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) % 30 ==", "# ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time() best_acc = 0.0 num_step = 0", "= data images = images.to(device) labels = labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images)", "# 不使用梯度,减少内存占用 for images, labels in valid_loader: # 将测试数据放入GPU上 images, labels = images.to(device),", "# 训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(),", "_train(train_loader, num_step): print(' training stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc =", "torch.sum(preds == labels).item() train_acc += acc train_loss += loss num_batch += 1 num_samples", "+ 'cifar-10/' model_dir = model_dir + 'cifar10_resnet50_v1' + '.pth' log_dir = log_dir +", "-----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4),", "net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) # 将网络放置到GPU上 net.to(device) #", "DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50() print(net) # 重写网络的最后一层 fc_in_features", "test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)", "self.root_data_save = '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset = 'Dataset/' # root_project = ''", "train_loss / num_batch return train_acc, train_loss, num_step # 验证子函数 def _valid(valid_loader): print(' valid", "# 训练 train_acc, train_loss, num_step = _train(train_loader, num_step) # 验证 valid_acc, valid_loss =", "num_batch += 1 num_samples += images.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples", "30 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index, loss, acc /", "torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item()", "定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time() best_acc = 0.0 num_step = 0 # 开始周期训练 for", "F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) #", "epoch_start_time print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch, epochs, epoch_time // 60, epoch_time % 60))", "= F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels)", "'cifar-10/' model_dir = model_dir + 'cifar10_resnet50_v1' + '.pth' log_dir = log_dir + 'cifar10_resnet50_v1'", "transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset", "outputs = net(images) outputs = F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1)", "// 60, epoch_time % 60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss,", "# self.root_dataset = '/home/xiaonan/Dataset/' # self.root_project = '/home/xiaonan/experients/' # self.root_data_save = '/home/xiaonan/data_save/' #", "cifar_10_dir = root_dataset + 'cifar-10/' model_dir = model_dir + 'cifar10_resnet50_v1' + '.pth' log_dir", "net.to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....')", "images = images.to(device) labels = labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images) outputs =", "1 num_samples += images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index", "labels in valid_loader: # 将测试数据放入GPU上 images, labels = images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs", "= transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess", "print('-' * 20) # 训练 train_acc, train_loss, num_step = _train(train_loader, num_step) # 验证", "scalar_value=loss, global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc / images.size(0)}, global_step=num_step) #", "= train_loss / num_batch return train_acc, train_loss, num_step # 验证子函数 def _valid(valid_loader): print('", "with torch.no_grad(): # 不使用梯度,减少内存占用 for images, labels in valid_loader: # 将测试数据放入GPU上 images, labels", "dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失", "root_project = '/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset = '/home/xiaonan/Dataset/' #", "# 获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) #", "global_step=num_step) # 更新全局步骤 num_step += 1 # 计算训练的准确率和损失值 train_acc = train_acc / num_samples", "valid_loss += loss num_batch += 1 num_samples += images.size(0) # 计算测试精度和损失值 valid_acc =", "import os import time # ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/' root_project =", "train_loss, num_step # 验证子函数 def _valid(valid_loader): print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc", "valid_acc = valid_acc / num_samples valid_loss = valid_loss / num_batch return valid_acc, valid_loss", "将网络放置到GPU上 net.to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------", "20) # 训练 train_acc, train_loss, num_step = _train(train_loader, num_step) # 验证 valid_acc, valid_loss", "num_samples += images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index +", "if os.path.exists(root_data_save) is False: os.mkdir(root_data_save) if os.path.exists(model_dir) is False: os.mkdir(model_dir) if os.path.exists(log_dir) is", "'train_acc': train_acc, 'valid_loss': valid_loss, 'valid_acc': valid_acc}, global_step=epoch) # 选出最好的模型参数 if valid_acc > best_acc:", "训练结束时间、输出最好的精度 end_time = time.time() - start_time print('Training complete in {:.0f}m {:.0f}s'.format(end_time // 60,", "# root_project = '' # root_data_save = 'data_save/' # 模型保存目录、日志文件保存目录 model_dir = root_data_save", "进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for images, labels in valid_loader: # 将测试数据放入GPU上 images,", "False: os.mkdir(root_data_save) if os.path.exists(model_dir) is False: os.mkdir(model_dir) if os.path.exists(log_dir) is False: os.mkdir(log_dir) #", "images.size(0))) # 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy':", "weight_decay = 1e-4 momentum = 0.9 # 均值和标准差值 mean = [0.485, 0.456, 0.406]", "记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc /", "将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0 train_loss = 0.0 num_batch =", "root_data_save = 'data_save/' # 模型保存目录、日志文件保存目录 model_dir = root_data_save + 'checkpoints/' log_dir = root_data_save", "os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset + 'cifar-10/' model_dir = model_dir + 'cifar10_resnet50_v1'", "num_step = _train(train_loader, num_step) # 验证 valid_acc, valid_loss = _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time", "定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器", "epochs = 200 batch_size = 32 learning_rate = 0.1 lr_step_size = 30 weight_decay", "best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), model_dir) print(' epoch:{}, update model...'.format(epoch)) print()", "'/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset = '/home/xiaonan/Dataset/' # self.root_project = '/home/xiaonan/experients/' # self.root_data_save", "+ 'cifar10_resnet50_v1' + '.pth' log_dir = log_dir + 'cifar10_resnet50_v1' num_classes = 10 if", "200 batch_size = 32 learning_rate = 0.1 lr_step_size = 30 weight_decay = 1e-4", "= CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader =", "- start_time print('Training complete in {:.0f}m {:.0f}s'.format(end_time // 60, end_time % 60)) print('Best", "index, data in enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images, labels = data images =", "preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item()", "num_batch = 0 num_samples = 0 # 进行网络的训练 for index, data in enumerate(train_loader,", "valid_loader: # 将测试数据放入GPU上 images, labels = images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images)", "'.pth' log_dir = log_dir + 'cifar10_resnet50_v1' num_classes = 10 if os.path.exists(log_dir) is False:", "epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch, epochs, epoch_time // 60, epoch_time % 60)) print(' train_loss:{:.4f},", "train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader", "[0.229, 0.224, 0.255] # -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(),", "CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset,", "计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc += acc train_loss +=", "模型保存目录、日志文件保存目录 model_dir = root_data_save + 'checkpoints/' log_dir = root_data_save + 'log/' # 若文件夹不存在,则创建", "import torch import torch.nn as nn from torchvision.datasets import CIFAR10 from torch.optim import", "writer = SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- # 训练子函数 def _train(train_loader, num_step): print(' training stage....')", "valid_loss / num_batch return valid_acc, valid_loss # ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time()", "valid_acc torch.save(net.state_dict(), model_dir) print(' epoch:{}, update model...'.format(epoch)) print() # 训练结束时间、输出最好的精度 end_time = time.time()", "224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) # 获取训练集、测试集 train_dataset = CIFAR10(root=cifar_10_dir, train=True, transform=train_data_preprocess) test_dataset =", "推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images) outputs = F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs,", "# root_data_save = 'data_save/' # 模型保存目录、日志文件保存目录 model_dir = root_data_save + 'checkpoints/' log_dir =", "/ images.size(0))) # 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss,", "/ images.size(0)}, global_step=num_step) # 更新全局步骤 num_step += 1 # 计算训练的准确率和损失值 train_acc = train_acc", "train_acc = 0.0 train_loss = 0.0 num_batch = 0 num_samples = 0 #", "saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])", "epochs, epoch_time // 60, epoch_time % 60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss,", "数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset = '/home/xiaonan/Dataset/' # self.root_project = '/home/xiaonan/experients/' # self.root_data_save = '/home/xiaonan/data_save/'", "torchvision.transforms as transforms from torch.utils.tensorboard import SummaryWriter import torch.nn.functional as F import os", "统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc += acc train_loss += loss num_batch", "数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset = 'Dataset/' # root_project = '' # root_data_save = 'data_save/'", "Adam from torchvision.models import resnet50 from torch.utils.data import DataLoader import torchvision.transforms as transforms", "= '' # root_data_save = 'data_save/' # 模型保存目录、日志文件保存目录 model_dir = root_data_save + 'checkpoints/'", "images, labels = data images = images.to(device) labels = labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs", "= 0.1 lr_step_size = 30 weight_decay = 1e-4 momentum = 0.9 # 均值和标准差值", "_train(train_loader, num_step) # 验证 valid_acc, valid_loss = _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time()", "# 进行网络的训练 for index, data in enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images, labels =", "best_acc = 0.0 num_step = 0 # 开始周期训练 for epoch in range(epochs): #", "print('Epoch {}/{}'.format(epoch, epochs - 1)) print('-' * 20) # 训练 train_acc, train_loss, num_step", "for index, data in enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images, labels = data images", "获取每批次的训练数据、并将训练数据放入GPU中 images, labels = data images = images.to(device) labels = labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值", "推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images) outputs = F.softmax(outputs, dim=1) loss = criterion(outputs, labels) #", "# ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/'", "epoch_time = time.time() - epoch_start_time print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch, epochs, epoch_time //", "from torchvision.datasets import CIFAR10 from torch.optim import Adam from torchvision.models import resnet50 from", "# 检查设备情况 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('device:{}'.format(device)) # 设置超参数 epochs", "SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- # 训练子函数 def _train(train_loader, num_step): print(' training stage....') # 将网络结构调成训练模式;初始化梯度张量", "= images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images) outputs = F.softmax(outputs, dim=1) #", "0.1 lr_step_size = 30 weight_decay = 1e-4 momentum = 0.9 # 均值和标准差值 mean", "------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50() print(net) # 重写网络的最后一层 fc_in_features = net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features)", "num_workers=4) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50() print(net)", "训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean,", "net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) # 将网络放置到GPU上 net.to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss()", "计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds ==", "+= loss num_batch += 1 num_samples += images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad()", "* 20) # 训练 train_acc, train_loss, num_step = _train(train_loader, num_step) # 验证 valid_acc,", "net(images) outputs = F.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss =", "batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index, loss, acc / images.size(0))) # 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index)", "# ------------------定义训练、验证子函数-------------------- # 训练子函数 def _train(train_loader, num_step): print(' training stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train()", "if torch.cuda.is_available() else 'cpu') print('device:{}'.format(device)) # 设置超参数 epochs = 200 batch_size = 32", "shuffle=True, num_workers=4) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) # ------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50()", "data in enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images, labels = data images = images.to(device)", "torch.save(net.state_dict(), model_dir) print(' epoch:{}, update model...'.format(epoch)) print() # 训练结束时间、输出最好的精度 end_time = time.time() -", "% 30 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index, loss, acc", "= 'Dataset/' # root_project = '' # root_data_save = 'data_save/' # 模型保存目录、日志文件保存目录 model_dir", "= images.to(device) labels = labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images) outputs = F.softmax(outputs,", "# 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc, 'valid_loss': valid_loss, 'valid_acc': valid_acc}, global_step=epoch) #", "valid_loss = valid_loss / num_batch return valid_acc, valid_loss # ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time", "+ '.pth' log_dir = log_dir + 'cifar10_resnet50_v1' num_classes = 10 if os.path.exists(log_dir) is", "writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc / images.size(0)}, global_step=num_step) # 更新全局步骤 num_step += 1", "= 10 if os.path.exists(log_dir) is False: os.mkdir(log_dir) # 检查设备情况 device = torch.device('cuda:0' if", "10 if os.path.exists(log_dir) is False: os.mkdir(log_dir) # 检查设备情况 device = torch.device('cuda:0' if torch.cuda.is_available()", "# 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc += acc train_loss", "num_samples train_loss = train_loss / num_batch return train_acc, train_loss, num_step # 验证子函数 def", "valid_acc}, global_step=epoch) # 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc", "SummaryWriter import torch.nn.functional as F import os import time # ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定)", "as transforms from torch.utils.tensorboard import SummaryWriter import torch.nn.functional as F import os import", "torchvision.models import resnet50 from torch.utils.data import DataLoader import torchvision.transforms as transforms from torch.utils.tensorboard", "root_dataset + 'cifar-10/' model_dir = model_dir + 'cifar10_resnet50_v1' + '.pth' log_dir = log_dir", "# 输出一定次数的损失和精度情况 if (index + 1) % 30 == 0: # 输出损失值和精度值 print('", "valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc = 0.0 valid_loss = 0.0 num_batch =", "224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224,", "num_batch return valid_acc, valid_loss # ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time() best_acc =", "{:.0f}s'. format(epoch, epochs, epoch_time // 60, epoch_time % 60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f},", "batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index, loss, acc / images.size(0))) # 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss,", "dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss", "= resnet50() print(net) # 重写网络的最后一层 fc_in_features = net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features) net.fc =", "计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss = train_loss / num_batch return train_acc,", "32 learning_rate = 0.1 lr_step_size = 30 weight_decay = 1e-4 momentum = 0.9", "train_acc, 'valid_loss': valid_loss, 'valid_acc': valid_acc}, global_step=epoch) # 选出最好的模型参数 if valid_acc > best_acc: #", "+= acc train_loss += loss num_batch += 1 num_samples += images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量", "= root_dataset + 'cifar-10/' model_dir = model_dir + 'cifar10_resnet50_v1' + '.pth' log_dir =", "= [0.229, 0.224, 0.255] # -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224),", "/ num_samples valid_loss = valid_loss / num_batch return valid_acc, valid_loss # ----------------------------开始周期训练-------------------------------- #", "weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器 writer = SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- # 训练子函数", "# 验证 valid_acc, valid_loss = _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time() - epoch_start_time", "labels = data images = images.to(device) labels = labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs =", "= F.softmax(outputs, dim=1) loss = criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1)", "# 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....') #", "# 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset = '/home/xiaonan/Dataset/' # self.root_project = '/home/xiaonan/experients/' # self.root_data_save =", "acc / images.size(0)}, global_step=num_step) # 更新全局步骤 num_step += 1 # 计算训练的准确率和损失值 train_acc =", "print('Training complete in {:.0f}m {:.0f}s'.format(end_time // 60, end_time % 60)) print('Best val Acc:", "training stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0 train_loss =", "/ num_batch return valid_acc, valid_loss # ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time() best_acc", "loss, 'batch_accuracy': acc / images.size(0)}, global_step=num_step) # 更新全局步骤 num_step += 1 # 计算训练的准确率和损失值", "False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset + 'cifar-10/' model_dir = model_dir +", "# 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc", "0.0 num_batch = 0 num_samples = 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用", "60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss, valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)',", "os.path.exists(log_dir) is False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset + 'cifar-10/' model_dir =", "self.root_project = '/home/xiaonan/experients/' # self.root_data_save = '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset = 'Dataset/'", "均值和标准差值 mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.255] # -----------------------------读取数据集--------------------------------", "# 获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True,", "0 num_samples = 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for images, labels", "train_loss, 'train_acc': train_acc, 'valid_loss': valid_loss, 'valid_acc': valid_acc}, global_step=epoch) # 选出最好的模型参数 if valid_acc >", "print(net) # 重写网络的最后一层 fc_in_features = net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes)", "writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc / images.size(0)}, global_step=num_step)", "import CIFAR10 from torch.optim import Adam from torchvision.models import resnet50 from torch.utils.data import", "valid_acc, valid_loss # ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time() best_acc = 0.0 num_step", "= train_acc / num_samples train_loss = train_loss / num_batch return train_acc, train_loss, num_step", "print(' epoch:{}, update model...'.format(epoch)) print() # 训练结束时间、输出最好的精度 end_time = time.time() - start_time print('Training", "+= 1 num_samples += images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况 if", "# 设定每周期开始时间点、周期信息 epoch_start_time = time.time() print('Epoch {}/{}'.format(epoch, epochs - 1)) print('-' * 20)", "0 # 进行网络的训练 for index, data in enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images, labels", "= valid_acc / num_samples valid_loss = valid_loss / num_batch return valid_acc, valid_loss #", "root_dataset = '/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset", "num_batch = 0 num_samples = 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for", "= time.time() - start_time print('Training complete in {:.0f}m {:.0f}s'.format(end_time // 60, end_time %", "print('进行训练....') # 获得记录日志信息的写入器 writer = SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- # 训练子函数 def _train(train_loader, num_step):", "return train_acc, train_loss, num_step # 验证子函数 def _valid(valid_loader): print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量;", "'valid_acc': valid_acc}, global_step=epoch) # 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc =", "= labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images) outputs = F.softmax(outputs, dim=1) loss =", "将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc = 0.0 valid_loss = 0.0 num_batch = 0 num_samples =", "is False: os.mkdir(root_data_save) if os.path.exists(model_dir) is False: os.mkdir(model_dir) if os.path.exists(log_dir) is False: os.mkdir(log_dir)", "os.mkdir(root_data_save) if os.path.exists(model_dir) is False: os.mkdir(model_dir) if os.path.exists(log_dir) is False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量", "print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index, loss, acc / images.size(0))) # 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss',", "验证 valid_acc, valid_loss = _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time() - epoch_start_time print('", "+= images.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss = valid_loss /", "num_step # 验证子函数 def _valid(valid_loader): print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc =", "# 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), model_dir) print(' epoch:{}, update model...'.format(epoch)) print() #", "from torch.utils.data import DataLoader import torchvision.transforms as transforms from torch.utils.tensorboard import SummaryWriter import", "= log_dir + 'cifar10_resnet50_v1' num_classes = 10 if os.path.exists(log_dir) is False: os.mkdir(log_dir) #", "= model_dir + 'cifar10_resnet50_v1' + '.pth' log_dir = log_dir + 'cifar10_resnet50_v1' num_classes =", "= 0.0 num_step = 0 # 开始周期训练 for epoch in range(epochs): # 设定每周期开始时间点、周期信息", "outputs = F.softmax(outputs, dim=1) loss = criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs,", "self.root_dataset = '/home/xiaonan/Dataset/' # self.root_project = '/home/xiaonan/experients/' # self.root_data_save = '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器)", "tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc / images.size(0)}, global_step=num_step) # 更新全局步骤 num_step += 1 #", "model...'.format(epoch)) print() # 训练结束时间、输出最好的精度 end_time = time.time() - start_time print('Training complete in {:.0f}m", "将测试数据放入GPU上 images, labels = images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images) outputs =", "# 均值和标准差值 mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.255] #", "[0.485, 0.456, 0.406] std = [0.229, 0.224, 0.255] # -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess", "net.eval() valid_acc = 0.0 valid_loss = 0.0 num_batch = 0 num_samples = 0", "labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images) outputs = F.softmax(outputs, dim=1) loss = criterion(outputs,", "# cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset + 'cifar-10/' model_dir = model_dir + 'cifar10_resnet50_v1' +", "epochs - 1)) print('-' * 20) # 训练 train_acc, train_loss, num_step = _train(train_loader,", "== labels).item() train_acc += acc train_loss += loss num_batch += 1 num_samples +=", "# 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机)", "train_loss:{:.4f}, train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss, valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss,", "输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time() - epoch_start_time print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch, epochs, epoch_time", "0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index, loss, acc / images.size(0))) #", "0.255] # -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), #transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4,", "images, labels = images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images) outputs = F.softmax(outputs,", "= torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('device:{}'.format(device)) # 设置超参数 epochs = 200 batch_size", "# 获取每批次的训练数据、并将训练数据放入GPU中 images, labels = data images = images.to(device) labels = labels.to(device) #", "'checkpoints/' log_dir = root_data_save + 'log/' # 若文件夹不存在,则创建 if os.path.exists(root_data_save) is False: os.mkdir(root_data_save)", "format(train_loss, train_acc, valid_loss, valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc, 'valid_loss': valid_loss,", "cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset + 'cifar-10/' model_dir = model_dir + 'cifar10_resnet50_v1' + '.pth'", "loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss +=", "# 验证子函数 def _valid(valid_loader): print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc = 0.0", "+= loss num_batch += 1 num_samples += images.size(0) # 计算测试精度和损失值 valid_acc = valid_acc", "train_acc, valid_loss, valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc, 'valid_loss': valid_loss, 'valid_acc':", "# ------------------------构建网络、定义损失函数和优化器------------------------ net = resnet50() print(net) # 重写网络的最后一层 fc_in_features = net.fc.in_features # 网络最后一层的输入通道", "stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc = 0.0 valid_loss = 0.0 num_batch = 0", "valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss, valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc,", "from torchvision.models import resnet50 from torch.utils.data import DataLoader import torchvision.transforms as transforms from", "'cifar10_resnet50_v1' + '.pth' log_dir = log_dir + 'cifar10_resnet50_v1' num_classes = 10 if os.path.exists(log_dir)", "valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), model_dir) print(' epoch:{}, update", "'/home/xiaonan/Dataset/' # self.root_project = '/home/xiaonan/experients/' # self.root_data_save = '/home/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(服务器) # root_dataset", "# 获得记录日志信息的写入器 writer = SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- # 训练子函数 def _train(train_loader, num_step): print('", "num_step) # 验证 valid_acc, valid_loss = _valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time() -", "is False: os.mkdir(model_dir) if os.path.exists(log_dir) is False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset", "= [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.255] # -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理", "1)) print('-' * 20) # 训练 train_acc, train_loss, num_step = _train(train_loader, num_step) #", "train=True, transform=train_data_preprocess) test_dataset = CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,", "= 0.0 valid_loss = 0.0 num_batch = 0 num_samples = 0 # 进行测试集的测试", "print(' training stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0 train_loss", "CIFAR10(root=cifar_10_dir, train=False, transform=valid_data_preprocess) # 获取训练集和测试集的加载器 train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) test_loader =", "+ 'cifar10_resnet50_v1' num_classes = 10 if os.path.exists(log_dir) is False: os.mkdir(log_dir) # 检查设备情况 device", "= 0.0 num_batch = 0 num_samples = 0 # 进行网络的训练 for index, data", "'Dataset/' # root_project = '' # root_data_save = 'data_save/' # 模型保存目录、日志文件保存目录 model_dir =", "images.size(0)}, global_step=num_step) # 更新全局步骤 num_step += 1 # 计算训练的准确率和损失值 train_acc = train_acc /", "= nn.CrossEntropyLoss() optimizer = Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器 writer =", "# 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss += loss num_batch += 1", "start_time print('Training complete in {:.0f}m {:.0f}s'.format(end_time // 60, end_time % 60)) print('Best val", "# 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc / images.size(0)}, global_step=num_step) # 更新全局步骤 num_step", "torch.cuda.is_available() else 'cpu') print('device:{}'.format(device)) # 设置超参数 epochs = 200 batch_size = 32 learning_rate", "= 200 batch_size = 32 learning_rate = 0.1 lr_step_size = 30 weight_decay =", "model_dir + 'cifar10_resnet50_v1' + '.pth' log_dir = log_dir + 'cifar10_resnet50_v1' num_classes = 10", "net(images) outputs = F.softmax(outputs, dim=1) loss = criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds =", "transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4, contrast=0.4), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]) valid_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(),", "'data_save/' # 模型保存目录、日志文件保存目录 model_dir = root_data_save + 'checkpoints/' log_dir = root_data_save + 'log/'", "# 更新全局步骤 num_step += 1 # 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss", "time:{:.0f}m {:.0f}s'. format(epoch, epochs, epoch_time // 60, epoch_time % 60)) print(' train_loss:{:.4f}, train_acc:{:.4f}\\n", "import torch.nn as nn from torchvision.datasets import CIFAR10 from torch.optim import Adam from", "print() # 训练结束时间、输出最好的精度 end_time = time.time() - start_time print('Training complete in {:.0f}m {:.0f}s'.format(end_time", "num_batch += 1 num_samples += images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况", "is False: os.mkdir(log_dir) # 检查设备情况 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('device:{}'.format(device))", "root_dataset = 'Dataset/' # root_project = '' # root_data_save = 'data_save/' # 模型保存目录、日志文件保存目录", "if os.path.exists(model_dir) is False: os.mkdir(model_dir) if os.path.exists(log_dir) is False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir", "0.0 valid_loss = 0.0 num_batch = 0 num_samples = 0 # 进行测试集的测试 with", "epoch:{}, update model...'.format(epoch)) print() # 训练结束时间、输出最好的精度 end_time = time.time() - start_time print('Training complete", "torch.utils.tensorboard import SummaryWriter import torch.nn.functional as F import os import time # ---------------------配置阶段------------------------------", "= 0.0 num_batch = 0 num_samples = 0 # 进行测试集的测试 with torch.no_grad(): #", "writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc, 'valid_loss': valid_loss, 'valid_acc': valid_acc}, global_step=epoch) # 选出最好的模型参数 if", "{:.0f}s'.format(end_time // 60, end_time % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # 关闭writer writer.close()", "0.0 num_step = 0 # 开始周期训练 for epoch in range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time", "> best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), model_dir) print(' epoch:{}, update model...'.format(epoch))", "# 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss = valid_loss / num_batch return", "= nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) # 将网络放置到GPU上 net.to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer", "1) % 30 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index, loss,", "resnet50 from torch.utils.data import DataLoader import torchvision.transforms as transforms from torch.utils.tensorboard import SummaryWriter", "False: os.mkdir(model_dir) if os.path.exists(log_dir) is False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset +", "torch.nn.functional as F import os import time # ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset =", "1 num_samples += images.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss =", "0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for images, labels in valid_loader: #", "----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time = time.time() best_acc = 0.0 num_step = 0 #", "# 定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0 train_loss = 0.0 num_batch = 0 num_samples =", "= '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset = '/home/xiaonan/Dataset/' # self.root_project = '/home/xiaonan/experients/' #", "重写网络的最后一层 fc_in_features = net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) #", "acc / images.size(0))) # 记录训练批次的损失和准确率 # writer.add_scalar('Train/Loss', scalar_value=loss, global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss':", "net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0 train_loss = 0.0 num_batch = 0", "import time # ---------------------配置阶段------------------------------ # 数据集根目录、项目根目录、训练数据保存目录(实验室,根据个人情况设定) root_dataset = '/home/team/xiaonan/Dataset/' root_project = '/home/team/xiaonan/experients/' root_data_save", "计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc +=", "images.size(0) # 反向传播(计算梯度);梯度下降优化(更新参数);重置梯度张量 loss.backward() optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) %", "labels)).item() valid_loss += loss num_batch += 1 num_samples += images.size(0) # 计算测试精度和损失值 valid_acc", "# 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(images) outputs = F.softmax(outputs, dim=1) loss = criterion(outputs, labels)", "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print('device:{}'.format(device)) # 设置超参数 epochs = 200", "optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量 train_acc = 0.0 train_loss = 0.0 num_batch = 0 num_samples", "labels = images.to(device), labels.to(device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(images) outputs = F.softmax(outputs, dim=1)", "_valid(test_loader) # 输出每周期的训练、验证的平均损失值、准确率 epoch_time = time.time() - epoch_start_time print(' epoch:{}/{}, time:{:.0f}m {:.0f}s'. format(epoch,", "# 网络最后一层的输入通道 print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net) # 将网络放置到GPU上 net.to(device) # 定义损失函数和优化器", "= valid_loss / num_batch return valid_acc, valid_loss # ----------------------------开始周期训练-------------------------------- # 定义训练开始时间、最好验证准确度(用于保存最好的模型)、统计训练步骤总数 start_time =", "num_step = 0 # 开始周期训练 for epoch in range(epochs): # 设定每周期开始时间点、周期信息 epoch_start_time =", "Adam(params=net.parameters(), weight_decay=weight_decay) # ----------------------进行网络的训练------------------------------------ print('进行训练....') # 获得记录日志信息的写入器 writer = SummaryWriter(log_dir) # ------------------定义训练、验证子函数-------------------- #", "# 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() valid_acc = 0.0 valid_loss = 0.0 num_batch = 0 num_samples", "train_acc:{:.4f}\\n valid_loss:{:.4f}, valid_acc:{:.4f}'. format(train_loss, train_acc, valid_loss, valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc':", "global_step=index) # 单个标签 writer.add_scalars(main_tag='Train(batch)', tag_scalar_dict={'batch_loss': loss, 'batch_accuracy': acc / images.size(0)}, global_step=num_step) # 更新全局步骤", "进行网络的训练 for index, data in enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 images, labels = data", "torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc +=", "num_classes = 10 if os.path.exists(log_dir) is False: os.mkdir(log_dir) # 检查设备情况 device = torch.device('cuda:0'", "criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc", "- 1)) print('-' * 20) # 训练 train_acc, train_loss, num_step = _train(train_loader, num_step)", "# root_dataset = 'Dataset/' # root_project = '' # root_data_save = 'data_save/' #", "'/home/team/xiaonan/experients/' root_data_save = '/home/team/xiaonan/data_save/' # 数据集根目录、项目根目录、训练数据保存目录(本机) # self.root_dataset = '/home/xiaonan/Dataset/' # self.root_project =", "pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred", "训练子函数 def _train(train_loader, num_step): print(' training stage....') # 将网络结构调成训练模式;初始化梯度张量 net.train() optimizer.zero_grad() # 定义准确率变量,损失值,批次数量,样本总数量", "from torch.utils.tensorboard import SummaryWriter import torch.nn.functional as F import os import time #", "= root_data_save + 'checkpoints/' log_dir = root_data_save + 'log/' # 若文件夹不存在,则创建 if os.path.exists(root_data_save)", "loss.backward() optimizer.step() optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) % 30 == 0:", "# 重写网络的最后一层 fc_in_features = net.fc.in_features # 网络最后一层的输入通道 print(fc_in_features) net.fc = nn.Linear(in_features=fc_in_features, out_features=num_classes) print(net)", "valid_loss, valid_acc)) # 记录测试结果 writer.add_scalars(main_tag='Train(epoch)', tag_scalar_dict={'train_loss': train_loss, 'train_acc': train_acc, 'valid_loss': valid_loss, 'valid_acc': valid_acc},", "num_samples = 0 # 进行网络的训练 for index, data in enumerate(train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中", "= 0 num_samples = 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for images,", "num_samples += images.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss = valid_loss", "if os.path.exists(log_dir) is False: os.mkdir(log_dir) # cifar-10数据集目录;模型名称;类别数量 cifar_10_dir = root_dataset + 'cifar-10/' model_dir", "valid_acc = 0.0 valid_loss = 0.0 num_batch = 0 num_samples = 0 #", "= torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc", "std = [0.229, 0.224, 0.255] # -----------------------------读取数据集-------------------------------- # 训练集、验证集、测试集预处理 train_data_preprocess = transforms.Compose([transforms.Resize(size=(224, 224)),", "+ 1) % 30 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\\n'. format(index," ]
[ "await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!')", "else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='load', description='load or", "ephemeral=True) except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not", "async def check_perms(self, interaction: discord.Interaction, permissions: str, user: discord.User) -> None: from src.perms", "discord import app_commands from discord.ext import commands class owner(commands.Cog): def __init__(self, bot: commands.Bot)", "bot @app_commands.command(name='eval', description='evaluate') async def eval(self, interaction: discord.Interaction, code: str) -> None: if", "except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not the", "`{cog}`...', ephemeral=True) await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception as e: await interaction.response.send_message(f'`{e}`',", "author is bot owner if await perms(interaction, 'bot_owner'): # run perms result =", "if await interaction.client.is_owner(interaction.user): try: result = eval(code) await interaction.response.send_message(f'> {result}', ephemeral=True) except Exception", "await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else:", "description='evaluate') async def eval(self, interaction: discord.Interaction, code: str) -> None: if await interaction.client.is_owner(interaction.user):", "try: result = eval(code) await interaction.response.send_message(f'> {result}', ephemeral=True) except Exception as e: await", "as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of", "discord.User) -> None: from src.perms import perms # check if author is bot", "owner(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot = bot @app_commands.command(name='eval', description='evaluate') async", "permissions of the user') async def check_perms(self, interaction: discord.Interaction, permissions: str, user: discord.User)", "perms(interaction, permissions, user) # send result await interaction.response.send_message(f'{result}', ephemeral=True) else: await interaction.response.send_message(f'You are", "from discord import app_commands from discord.ext import commands class owner(commands.Cog): def __init__(self, bot:", "if await perms(interaction, 'bot_owner'): # run perms result = await perms(interaction, permissions, user)", "'bot_owner'): # run perms result = await perms(interaction, permissions, user) # send result", "None: if await interaction.client.is_owner(interaction.user): try: if reload: await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await self.bot.reload_extension(cog)", "check if author is bot owner if await perms(interaction, 'bot_owner'): # run perms", "= await perms(interaction, permissions, user) # send result await interaction.response.send_message(f'{result}', ephemeral=True) else: await", "ephemeral=True) @app_commands.command(name='load', description='load or reload a cog') async def load(self, interaction: discord.Interaction, cog:", "user') async def check_perms(self, interaction: discord.Interaction, permissions: str, user: discord.User) -> None: from", "str) -> None: if await interaction.client.is_owner(interaction.user): try: result = eval(code) await interaction.response.send_message(f'> {result}',", "-> None: from src.perms import perms # check if author is bot owner", "result = await perms(interaction, permissions, user) # send result await interaction.response.send_message(f'{result}', ephemeral=True) else:", "await interaction.response.send_message(f'> {result}', ephemeral=True) except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await", "if reload: await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await", "else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='check_perms', description='check the", "check_perms(self, interaction: discord.Interaction, permissions: str, user: discord.User) -> None: from src.perms import perms", "discord.Interaction, permissions: str, user: discord.User) -> None: from src.perms import perms # check", "perms # check if author is bot owner if await perms(interaction, 'bot_owner'): #", "app_commands from discord.ext import commands class owner(commands.Cog): def __init__(self, bot: commands.Bot) -> None:", "reload: bool) -> None: if await interaction.client.is_owner(interaction.user): try: if reload: await interaction.response.send_message(f'Reloading `{cog}`...',", "interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='load', description='load or reload a", "bot: commands.Bot) -> None: self.bot = bot @app_commands.command(name='eval', description='evaluate') async def eval(self, interaction:", "str, reload: bool) -> None: if await interaction.client.is_owner(interaction.user): try: if reload: await interaction.response.send_message(f'Reloading", "await perms(interaction, permissions, user) # send result await interaction.response.send_message(f'{result}', ephemeral=True) else: await interaction.response.send_message(f'You", "`{cog}`...', ephemeral=True) await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await", "__init__(self, bot: commands.Bot) -> None: self.bot = bot @app_commands.command(name='eval', description='evaluate') async def eval(self,", "result = eval(code) await interaction.response.send_message(f'> {result}', ephemeral=True) except Exception as e: await interaction.response.send_message(f'`{e}`',", "import perms # check if author is bot owner if await perms(interaction, 'bot_owner'):", "are not the owner of me.', ephemeral=True) @app_commands.command(name='check_perms', description='check the permissions of the", "a cog') async def load(self, interaction: discord.Interaction, cog: str, reload: bool) -> None:", "async def load(self, interaction: discord.Interaction, cog: str, reload: bool) -> None: if await", "@app_commands.command(name='eval', description='evaluate') async def eval(self, interaction: discord.Interaction, code: str) -> None: if await", "interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception as e: await", "commands class owner(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot = bot @app_commands.command(name='eval',", "await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='check_perms', description='check the permissions", "await perms(interaction, 'bot_owner'): # run perms result = await perms(interaction, permissions, user) #", "None: self.bot = bot @app_commands.command(name='eval', description='evaluate') async def eval(self, interaction: discord.Interaction, code: str)", "interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!') except", "class owner(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot = bot @app_commands.command(name='eval', description='evaluate')", "self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await", "run perms result = await perms(interaction, permissions, user) # send result await interaction.response.send_message(f'{result}',", "e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.',", "permissions, user) # send result await interaction.response.send_message(f'{result}', ephemeral=True) else: await interaction.response.send_message(f'You are not", "-> None: if await interaction.client.is_owner(interaction.user): try: result = eval(code) await interaction.response.send_message(f'> {result}', ephemeral=True)", "perms result = await perms(interaction, permissions, user) # send result await interaction.response.send_message(f'{result}', ephemeral=True)", "None: from src.perms import perms # check if author is bot owner if", "import discord from discord import app_commands from discord.ext import commands class owner(commands.Cog): def", "user) # send result await interaction.response.send_message(f'{result}', ephemeral=True) else: await interaction.response.send_message(f'You are not the", "@app_commands.command(name='check_perms', description='check the permissions of the user') async def check_perms(self, interaction: discord.Interaction, permissions:", "await interaction.client.is_owner(interaction.user): try: if reload: await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded", "owner of me.', ephemeral=True) @app_commands.command(name='load', description='load or reload a cog') async def load(self,", "if author is bot owner if await perms(interaction, 'bot_owner'): # run perms result", "commands.Bot) -> None: self.bot = bot @app_commands.command(name='eval', description='evaluate') async def eval(self, interaction: discord.Interaction,", "me.', ephemeral=True) @app_commands.command(name='check_perms', description='check the permissions of the user') async def check_perms(self, interaction:", "ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='load', description='load", "ephemeral=True) await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True)", "self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded", "-> None: if await interaction.client.is_owner(interaction.user): try: if reload: await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await", "not the owner of me.', ephemeral=True) @app_commands.command(name='check_perms', description='check the permissions of the user')", "or reload a cog') async def load(self, interaction: discord.Interaction, cog: str, reload: bool)", "Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner", "if await interaction.client.is_owner(interaction.user): try: if reload: await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await self.bot.reload_extension(cog) await", "# run perms result = await perms(interaction, permissions, user) # send result await", "await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await self.bot.load_extension(cog) await", "reload: await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await interaction.response.send_message(f'Loading", "interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='check_perms',", "await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='load', description='load or reload", "@app_commands.command(name='load', description='load or reload a cog') async def load(self, interaction: discord.Interaction, cog: str,", "# send result await interaction.response.send_message(f'{result}', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner", "ephemeral=True) @app_commands.command(name='check_perms', description='check the permissions of the user') async def check_perms(self, interaction: discord.Interaction,", "None: if await interaction.client.is_owner(interaction.user): try: result = eval(code) await interaction.response.send_message(f'> {result}', ephemeral=True) except", "interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are", "interaction.client.is_owner(interaction.user): try: if reload: await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!')", "{result}', ephemeral=True) except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are", "discord from discord import app_commands from discord.ext import commands class owner(commands.Cog): def __init__(self,", "await interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You", "is bot owner if await perms(interaction, 'bot_owner'): # run perms result = await", "self.bot = bot @app_commands.command(name='eval', description='evaluate') async def eval(self, interaction: discord.Interaction, code: str) ->", "ephemeral=True) await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await self.bot.load_extension(cog)", "await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await interaction.response.send_message(f'Loading `{cog}`...',", "not the owner of me.', ephemeral=True) @app_commands.command(name='load', description='load or reload a cog') async", "permissions: str, user: discord.User) -> None: from src.perms import perms # check if", "owner if await perms(interaction, 'bot_owner'): # run perms result = await perms(interaction, permissions,", "owner of me.', ephemeral=True) @app_commands.command(name='check_perms', description='check the permissions of the user') async def", "from src.perms import perms # check if author is bot owner if await", "`{cog}`!') except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not", "me.', ephemeral=True) @app_commands.command(name='load', description='load or reload a cog') async def load(self, interaction: discord.Interaction,", "await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception as e:", "interaction: discord.Interaction, permissions: str, user: discord.User) -> None: from src.perms import perms #", "interaction: discord.Interaction, cog: str, reload: bool) -> None: if await interaction.client.is_owner(interaction.user): try: if", "else: await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception as", "perms(interaction, 'bot_owner'): # run perms result = await perms(interaction, permissions, user) # send", "str, user: discord.User) -> None: from src.perms import perms # check if author", "await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True)", "interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='load',", "await interaction.response.send_message(f'{result}', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True)", "are not the owner of me.', ephemeral=True) @app_commands.command(name='load', description='load or reload a cog')", "code: str) -> None: if await interaction.client.is_owner(interaction.user): try: result = eval(code) await interaction.response.send_message(f'>", "= eval(code) await interaction.response.send_message(f'> {result}', ephemeral=True) except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True)", "ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='check_perms', description='check", "# check if author is bot owner if await perms(interaction, 'bot_owner'): # run", "import commands class owner(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot = bot", "await interaction.client.is_owner(interaction.user): try: result = eval(code) await interaction.response.send_message(f'> {result}', ephemeral=True) except Exception as", "interaction.response.send_message(f'You are not the owner of me.', ephemeral=True) @app_commands.command(name='check_perms', description='check the permissions of", "interaction: discord.Interaction, code: str) -> None: if await interaction.client.is_owner(interaction.user): try: result = eval(code)", "-> None: self.bot = bot @app_commands.command(name='eval', description='evaluate') async def eval(self, interaction: discord.Interaction, code:", "bool) -> None: if await interaction.client.is_owner(interaction.user): try: if reload: await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True)", "the user') async def check_perms(self, interaction: discord.Interaction, permissions: str, user: discord.User) -> None:", "of me.', ephemeral=True) @app_commands.command(name='check_perms', description='check the permissions of the user') async def check_perms(self,", "the owner of me.', ephemeral=True) @app_commands.command(name='load', description='load or reload a cog') async def", "of me.', ephemeral=True) @app_commands.command(name='load', description='load or reload a cog') async def load(self, interaction:", "cog') async def load(self, interaction: discord.Interaction, cog: str, reload: bool) -> None: if", "send result await interaction.response.send_message(f'{result}', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of", "def load(self, interaction: discord.Interaction, cog: str, reload: bool) -> None: if await interaction.client.is_owner(interaction.user):", "= bot @app_commands.command(name='eval', description='evaluate') async def eval(self, interaction: discord.Interaction, code: str) -> None:", "`{cog}`!') else: await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True) await self.bot.load_extension(cog) await interaction.edit_original_message(content=f'Loaded `{cog}`!') except Exception", "async def eval(self, interaction: discord.Interaction, code: str) -> None: if await interaction.client.is_owner(interaction.user): try:", "reload a cog') async def load(self, interaction: discord.Interaction, cog: str, reload: bool) ->", "def eval(self, interaction: discord.Interaction, code: str) -> None: if await interaction.client.is_owner(interaction.user): try: result", "src.perms import perms # check if author is bot owner if await perms(interaction,", "discord.Interaction, cog: str, reload: bool) -> None: if await interaction.client.is_owner(interaction.user): try: if reload:", "discord.Interaction, code: str) -> None: if await interaction.client.is_owner(interaction.user): try: result = eval(code) await", "result await interaction.response.send_message(f'{result}', ephemeral=True) else: await interaction.response.send_message(f'You are not the owner of me.',", "import app_commands from discord.ext import commands class owner(commands.Cog): def __init__(self, bot: commands.Bot) ->", "of the user') async def check_perms(self, interaction: discord.Interaction, permissions: str, user: discord.User) ->", "interaction.client.is_owner(interaction.user): try: result = eval(code) await interaction.response.send_message(f'> {result}', ephemeral=True) except Exception as e:", "cog: str, reload: bool) -> None: if await interaction.client.is_owner(interaction.user): try: if reload: await", "user: discord.User) -> None: from src.perms import perms # check if author is", "description='load or reload a cog') async def load(self, interaction: discord.Interaction, cog: str, reload:", "from discord.ext import commands class owner(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot", "load(self, interaction: discord.Interaction, cog: str, reload: bool) -> None: if await interaction.client.is_owner(interaction.user): try:", "eval(self, interaction: discord.Interaction, code: str) -> None: if await interaction.client.is_owner(interaction.user): try: result =", "try: if reload: await interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else:", "interaction.response.send_message(f'Reloading `{cog}`...', ephemeral=True) await self.bot.reload_extension(cog) await interaction.edit_original_message(content=f'Reloaded `{cog}`!') else: await interaction.response.send_message(f'Loading `{cog}`...', ephemeral=True)", "the permissions of the user') async def check_perms(self, interaction: discord.Interaction, permissions: str, user:", "eval(code) await interaction.response.send_message(f'> {result}', ephemeral=True) except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else:", "def __init__(self, bot: commands.Bot) -> None: self.bot = bot @app_commands.command(name='eval', description='evaluate') async def", "def check_perms(self, interaction: discord.Interaction, permissions: str, user: discord.User) -> None: from src.perms import", "discord.ext import commands class owner(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot =", "description='check the permissions of the user') async def check_perms(self, interaction: discord.Interaction, permissions: str,", "interaction.response.send_message(f'> {result}', ephemeral=True) except Exception as e: await interaction.response.send_message(f'`{e}`', ephemeral=True) else: await interaction.response.send_message(f'You", "bot owner if await perms(interaction, 'bot_owner'): # run perms result = await perms(interaction,", "the owner of me.', ephemeral=True) @app_commands.command(name='check_perms', description='check the permissions of the user') async" ]
[ "will match with: galaxies.lens but not with: galaxies.lens galaxies.source Parameters ---------- instance A", "self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology) if self.hyper_model_image is not None: paths.save_object(\"hyper_model_image\",", "hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer", "instance.hyper_background_noise def plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance: \"\"\"", "with 0 or more galaxies in its tree Returns ------- instance The input", "= self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_imaging_for_plane(", "): return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths:", "\"\"\" Determine the fit of a lens galaxy and source galaxy to the", "save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization)", "af.DirectoryPaths, instance, during_analysis): instance = self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise", "InversionException, GridException from autoarray.inversion import pixelizations as pix, inversions as inv from autofit.exc", "None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image", ") def make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultInterferometer(", "FitException from e def fit_imaging_for_plane( self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ): return fit_imaging.FitImaging(", "self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultImaging( samples=samples, model=model, analysis=self,", "Determine the fit of a lens galaxy and source galaxy to the interferometer", "class AnalysisDataset(Analysis): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result,", "galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return instance", "dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) self.dataset = dataset @property def imaging(self):", "from autoarray.exc import PixelizationException, InversionException, GridException from autoarray.inversion import pixelizations as pix, inversions", "set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image def hyper_image_sky_for_instance(self, instance): if hasattr(instance,", "galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return instance def fit_interferometer_for_plane( self,", "super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15,", "@property def interferometer(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the fit of", "return instance.hyper_background_noise def plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance:", "but not with: galaxies.lens galaxies.source Parameters ---------- instance A model instance with 0", "is not None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities = None def set_hyper_dataset(self,", "def make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultInterferometer( samples=samples,", "af.NonLinearSearch ): return res.ResultImaging( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths):", "): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) self.dataset = dataset @property", "-> af.ModelInstance: \"\"\" Takes images from the last result, if there is one,", "plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise", "associates them with galaxies in this search where full-path galaxy names match. If", ") visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None,", "return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the fit of a lens galaxy", "-> af.ModelInstance: \"\"\" Takes visibilities from the last result, if there is one,", "not None: if hyper_result.search is not None: hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict", "def fit_interferometer_for_plane( self, plane, hyper_background_noise, use_hyper_scalings=True ): return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings,", "__init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset =", ") fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit,", ") if visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit,", "if self.hyper_result is not None: if hyper_result.search is not None: hyper_result.search.paths = None", "self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion", "self.dataset = dataset @property def imaging(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine", "cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset = dataset if self.hyper_result is", "self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer)", "self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultInterferometer( samples=samples, model=model, analysis=self,", "A fractional value indicating how well this model fit and the model imaging", "instance with attributes Returns ------- fit : Fit A fractional value indicating how", "None self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion self.preloads = preloads def set_hyper_dataset(self, result):", "return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise FitException from e def", "during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane( plane=plane,", "use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): instance = self.associate_hyper_images(instance=instance)", "return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance: \"\"\" Takes images from the", "not with: galaxies.lens galaxies.source Parameters ---------- instance A model instance with 0 or", ") def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def", "fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def", "AnalysisImaging(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset,", "None self.hyper_model_image = None self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion self.preloads = preloads", "hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ): return fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion,", "\"\"\" if self.hyper_galaxy_visibilities_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ):", "plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis):", "= self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit", "as pix, inversions as inv from autofit.exc import FitException from autogalaxy.analysis import result", "self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology) if self.hyper_model_image is not None: paths.save_object(\"hyper_model_image\", self.hyper_model_image) if", "interferometer itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try:", "as pl class Analysis(af.Analysis): def __init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result = hyper_result self.cosmology =", "= self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e:", "visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis ) if fit.inversion is not None:", ") def make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultImaging(", "af.Collection, search: af.NonLinearSearch ): return res.ResultInterferometer( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self,", "instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[", "this model fit and the model imaging itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance)", "this search where full-path galaxy names match. If the galaxy collection has a", "def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result,", "inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane(", "the fit of a lens galaxy and source galaxy to the imaging in", "inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane(", "imaging itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance(", "autoarray import preloads as pload from autoarray.exc import PixelizationException, InversionException, GridException from autoarray.inversion", ") try: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit except (PixelizationException,", "self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities = None def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities", "settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) if self.hyper_result is not None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict =", "instance): if hasattr(instance, \"hyper_background_noise\"): return instance.hyper_background_noise def plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self,", "------- fit : Fit A fractional value indicating how well this model fit", "vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis )", "galaxy and source galaxy to the interferometer in this lens. Parameters ---------- instance", "= None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict = None self.hyper_model_image = None self.settings_pixelization = settings_pixelization", "lens galaxy and source galaxy to the interferometer in this lens. Parameters ----------", "from autogalaxy.analysis import result as res from autogalaxy.analysis import visualizer as vis from", "\"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(),", "result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict ) @property def interferometer(self): return self.dataset def log_likelihood_function(self,", "from e def associate_hyper_visibilities( self, instance: af.ModelInstance ) -> af.ModelInstance: \"\"\" Takes visibilities", "import PixelizationException, InversionException, GridException from autoarray.inversion import pixelizations as pix, inversions as inv", ") def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): instance = self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance)", ") def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise =", "in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path ] return instance def", "during_analysis): instance = self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance(", "plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance)", "self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_interferometer_for_plane( plane=plane,", "self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer = vis.Visualizer(visualize_path=paths.image_path)", "fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException)", "settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) if self.hyper_result", "samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\",", "= self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance )", "self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities = None def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities", "self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) visualizer", "self, plane, hyper_background_noise, use_hyper_scalings=True ): return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion,", "visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion,", "source galaxy to the interferometer in this lens. Parameters ---------- instance A model", "def hyper_background_noise_for_instance(self, instance): if hasattr(instance, \"hyper_background_noise\"): return instance.hyper_background_noise def plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies)", "visualizer as vis from autogalaxy.fit import fit_imaging, fit_interferometer from autogalaxy.galaxy import galaxy as", "return instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance): if hasattr(instance, \"hyper_background_noise\"): return instance.hyper_background_noise def plane_for_instance(self, instance):", "def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance: \"\"\" Takes images from the last result,", "samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultImaging( samples=samples, model=model, analysis=self, search=search", "settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) self.dataset", "__init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result = hyper_result self.cosmology = cosmology class AnalysisDataset(Analysis): def __init__(", "hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) self.dataset = dataset @property def imaging(self): return", "plane=plane, hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise FitException", "images from the last result, if there is one, and associates them with", "af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology)", "samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"uv_wavelengths\", self.dataset.uv_wavelengths) paths.save_object(\"real_space_mask\",", "self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology) if self.hyper_model_image is not None: paths.save_object(\"hyper_model_image\", self.hyper_model_image) if self.hyper_galaxy_image_path_dict is", "__init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology,", "galaxies where possible. \"\"\" if self.hyper_galaxy_visibilities_path_dict is not None: for galaxy_path, galaxy in", "the model interferometer itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance", "itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit", "vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis ) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion,", "self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return instance def fit_interferometer_for_plane(", "------- instance The input instance with images associated with galaxies where possible. \"\"\"", "Returns ------- fit : Fit A fractional value indicating how well this model", "with galaxies in this search where full-path galaxy names match. If the galaxy", "paths: af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\",", "settings_inversion=settings_inversion, preloads=preloads, ) self.dataset = dataset @property def imaging(self): return self.dataset def log_likelihood_function(self,", "def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict ) @property", "value indicating how well this model fit and the model interferometer itself \"\"\"", "paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(),", "self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_interferometer_for_plane(", "af from autoarray import preloads as pload from autoarray.exc import PixelizationException, InversionException, GridException", "hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset = dataset if self.hyper_result", "plane=plane, hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples:", "fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths,", "plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise", "the imaging in this lens. Parameters ---------- instance A model instance with attributes", "visualize(self, paths: af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance", "if galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path ] return", "plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion is", "an association is not made. e.g. galaxies.lens will match with: galaxies.lens but not", "the interferometer in this lens. Parameters ---------- instance A model instance with attributes", "hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance) plane", "g.Galaxy ): if galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path", "is not None: hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict = None self.hyper_model_image =", "save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def __init__( self,", "of a lens galaxy and source galaxy to the interferometer in this lens.", "galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return instance def fit_interferometer_for_plane( self, plane, hyper_background_noise, use_hyper_scalings=True", "plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ): return fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization,", "hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict = None self.hyper_model_image = None self.settings_pixelization =", "import pixelizations as pix, inversions as inv from autofit.exc import FitException from autogalaxy.analysis", "vis from autogalaxy.fit import fit_imaging, fit_interferometer from autogalaxy.galaxy import galaxy as g from", "hyper_background_noise_for_instance(self, instance): if hasattr(instance, \"hyper_background_noise\"): return instance.hyper_background_noise def plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies) def", "galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path ] return instance", "= self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) visualizer = vis.Visualizer(visualize_path=paths.image_path)", "in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities =", "self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict ) @property def interferometer(self): return self.dataset def log_likelihood_function(self, instance):", "try: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit except (PixelizationException, InversionException,", "self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit", "self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise )", "af.NonLinearSearch ): return res.ResultInterferometer( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths):", "hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result = hyper_result self.cosmology = cosmology class AnalysisDataset(Analysis): def __init__( self,", "instance): \"\"\" Determine the fit of a lens galaxy and source galaxy to", "from astropy import cosmology as cosmo import autofit as af from autoarray import", "self.hyper_galaxy_image_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path", "galaxy_path ] return instance def fit_interferometer_for_plane( self, plane, hyper_background_noise, use_hyper_scalings=True ): return fit_interferometer.FitInterferometer(", "self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_imaging_for_plane( plane=plane,", "hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer(", "None: paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15,", "self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result(", "\"hyper_image_sky\"): return instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance): if hasattr(instance, \"hyper_background_noise\"): return instance.hyper_background_noise def plane_for_instance(self,", "= settings_pixelization self.settings_inversion = settings_inversion self.preloads = preloads def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict =", "is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if", "if hasattr(instance, \"hyper_background_noise\"): return instance.hyper_background_noise def plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance:", "imaging in this lens. Parameters ---------- instance A model instance with attributes Returns", "GridException) as e: raise FitException from e def fit_imaging_for_plane( self, plane, hyper_image_sky, hyper_background_noise,", "import FitException from autogalaxy.analysis import result as res from autogalaxy.analysis import visualizer as", "visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\"", "galaxy collection has a different name then an association is not made. e.g.", "self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path ] return instance def save_attributes_for_aggregator(self,", "result, if there is one, and associates them with galaxies in this search", "during_analysis): self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_interferometer_for_plane(", "as vis from autogalaxy.fit import fit_imaging, fit_interferometer from autogalaxy.galaxy import galaxy as g", "images associated with galaxies where possible. \"\"\" if self.hyper_galaxy_image_path_dict is not None: for", "class AnalysisImaging(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(", "fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as", "return instance def fit_interferometer_for_plane( self, plane, hyper_background_noise, use_hyper_scalings=True ): return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane,", ") return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise FitException from e", "settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) if", "use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples, model:", "hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads,", "associate_hyper_visibilities( self, instance: af.ModelInstance ) -> af.ModelInstance: \"\"\" Takes visibilities from the last", "instance, during_analysis): instance = self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise =", "self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e:", "preloads=preloads, ) if self.hyper_result is not None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities", "A model instance with attributes Returns ------- fit : Fit A fractional value", "is not made. e.g. galaxies.lens will match with: galaxies.lens but not with: galaxies.lens", "def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance(", "with galaxies where possible. \"\"\" if self.hyper_galaxy_image_path_dict is not None: for galaxy_path, galaxy", "in its tree Returns ------- instance The input instance with images associated with", "self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(),", "not None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities = None def set_hyper_dataset(self, result):", "is not None: paths.save_object(\"hyper_model_image\", self.hyper_model_image) if self.hyper_galaxy_image_path_dict is not None: paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict", "af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultInterferometer( samples=samples, model=model, analysis=self, search=search )", "not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_image_path_dict:", "= None def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict", "galaxies where possible. \"\"\" if self.hyper_galaxy_image_path_dict is not None: for galaxy_path, galaxy in", "galaxies.lens galaxies.source Parameters ---------- instance A model instance with 0 or more galaxies", "galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities", "fit : Fit A fractional value indicating how well this model fit and", "settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset = dataset if self.hyper_result is not None:", "preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) self.dataset = dataset", "them with galaxies in this search where full-path galaxy names match. If the", "visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict,", "where possible. \"\"\" if self.hyper_galaxy_visibilities_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class(", "e: raise FitException from e def associate_hyper_visibilities( self, instance: af.ModelInstance ) -> af.ModelInstance:", "\"\"\" if self.hyper_galaxy_image_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ):", "self.hyper_result is not None: if hyper_result.search is not None: hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result)", "self.hyper_galaxy_image_path_dict = None self.hyper_model_image = None self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion self.preloads", "plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit", "self.hyper_model_visibilities = None def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = (", "def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def __init__(", "paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology) if", "during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane( plane=plane,", "galaxy to the interferometer in this lens. Parameters ---------- instance A model instance", "association is not made. e.g. galaxies.lens will match with: galaxies.lens but not with:", "samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultInterferometer( samples=samples, model=model, analysis=self, search=search", "self.hyper_model_image = result.hyper_model_image def hyper_image_sky_for_instance(self, instance): if hasattr(instance, \"hyper_image_sky\"): return instance.hyper_image_sky def hyper_background_noise_for_instance(self,", "settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset = dataset if self.hyper_result is not", "visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images(", "self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology) if self.hyper_model_image", "more galaxies in its tree Returns ------- instance The input instance with images", "not None: paths.save_object(\"hyper_model_image\", self.hyper_model_image) if self.hyper_galaxy_image_path_dict is not None: paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict )", "galaxy_path ] return instance def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\",", "hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples,", "self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return instance def fit_interferometer_for_plane( self, plane, hyper_background_noise, use_hyper_scalings=True ): return", "search where full-path galaxy names match. If the galaxy collection has a different", "instance def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion)", "if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane,", "= self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result(", "self.preloads = preloads def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image def", "FitException from autogalaxy.analysis import result as res from autogalaxy.analysis import visualizer as vis", "settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) self.dataset = dataset @property def imaging(self): return self.dataset def", ") visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples, model: af.Collection,", "instance with visibilities associated with galaxies where possible. \"\"\" if self.hyper_galaxy_visibilities_path_dict is not", "): return res.ResultImaging( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths)", "hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False )", "if self.hyper_galaxy_image_path_dict is not None: paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset): def __init__(", "visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis ) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis )", "= preloads def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image def hyper_image_sky_for_instance(self,", "the fit of a lens galaxy and source galaxy to the interferometer in", "pload from autoarray.exc import PixelizationException, InversionException, GridException from autoarray.inversion import pixelizations as pix,", "and the model interferometer itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance(", "paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def __init__( self, dataset,", "def hyper_image_sky_for_instance(self, instance): if hasattr(instance, \"hyper_image_sky\"): return instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance): if hasattr(instance,", "import galaxy as g from autogalaxy.plane import plane as pl class Analysis(af.Analysis): def", "make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultImaging( samples=samples, model=model,", "import preloads as pload from autoarray.exc import PixelizationException, InversionException, GridException from autoarray.inversion import", "in this lens. Parameters ---------- instance A model instance with attributes Returns -------", "settings_inversion self.preloads = preloads def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image", "paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology) if self.hyper_model_image is not None:", "instance=instance ) fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer(", "\"\"\" Takes visibilities from the last result, if there is one, and associates", "self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ):", "autogalaxy.analysis import result as res from autogalaxy.analysis import visualizer as vis from autogalaxy.fit", "instance def fit_interferometer_for_plane( self, plane, hyper_background_noise, use_hyper_scalings=True ): return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise,", "as g from autogalaxy.plane import plane as pl class Analysis(af.Analysis): def __init__(self, hyper_result=None,", ") if self.hyper_result is not None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities =", "import autofit as af from autoarray import preloads as pload from autoarray.exc import", "settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance)", "for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image =", "AnalysisDataset(Analysis): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology)", "(PixelizationException, InversionException, GridException) as e: raise FitException from e def fit_imaging_for_plane( self, plane,", "if self.hyper_galaxy_visibilities_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if", "well this model fit and the model interferometer itself \"\"\" self.associate_hyper_images(instance=instance) plane =", "= self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as", "instance = self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance", "self.hyper_galaxy_image_path_dict is not None: paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset): def __init__( self,", "the model imaging itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise", "self.cosmology = cosmology class AnalysisDataset(Analysis): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(),", "e: raise FitException from e def fit_imaging_for_plane( self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ):", "there is one, and associates them with galaxies in this search where full-path", "with: galaxies.lens galaxies.source Parameters ---------- instance A model instance with 0 or more", "instance A model instance with attributes Returns ------- fit : Fit A fractional", ": Fit A fractional value indicating how well this model fit and the", "hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False,", "not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper:", "dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) if self.hyper_result is not None: self.set_hyper_dataset(result=self.hyper_result)", "galaxies in this search where full-path galaxy names match. If the galaxy collection", "from autogalaxy.galaxy import galaxy as g from autogalaxy.plane import plane as pl class", "= None self.hyper_model_image = None self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion self.preloads =", "paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology) if self.hyper_model_image is", "model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"uv_wavelengths\", self.dataset.uv_wavelengths) paths.save_object(\"real_space_mask\", self.dataset.real_space_mask)", "fit of a lens galaxy and source galaxy to the imaging in this", "= cosmology class AnalysisDataset(Analysis): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(),", "self.hyper_galaxy_image_path_dict[ galaxy_path ] return instance def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map)", "---------- instance A model instance with 0 or more galaxies in its tree", "] return instance def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings)", "else: self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities = None def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities =", "self.dataset = dataset if self.hyper_result is not None: if hyper_result.search is not None:", "how well this model fit and the model imaging itself \"\"\" self.associate_hyper_images(instance=instance) plane", "not None: hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict = None self.hyper_model_image = None", "= settings_inversion self.preloads = preloads def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image =", "paths: af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance )", "autoarray.exc import PixelizationException, InversionException, GridException from autoarray.inversion import pixelizations as pix, inversions as", "instance): if hasattr(instance, \"hyper_image_sky\"): return instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance): if hasattr(instance, \"hyper_background_noise\"): return", "Fit A fractional value indicating how well this model fit and the model", "to the interferometer in this lens. Parameters ---------- instance A model instance with", "model: af.Collection, search: af.NonLinearSearch ): return res.ResultImaging( samples=samples, model=model, analysis=self, search=search ) def", "= self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return instance def fit_interferometer_for_plane( self, plane, hyper_background_noise, use_hyper_scalings=True ):", "af.ModelInstance: \"\"\" Takes images from the last result, if there is one, and", "and associates them with galaxies in this search where full-path galaxy names match.", "The input instance with visibilities associated with galaxies where possible. \"\"\" if self.hyper_galaxy_visibilities_path_dict", "dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion,", "with attributes Returns ------- fit : Fit A fractional value indicating how well", "Takes visibilities from the last result, if there is one, and associates them", "def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image def hyper_image_sky_for_instance(self, instance): if", "result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image def hyper_image_sky_for_instance(self, instance): if hasattr(instance, \"hyper_image_sky\"): return instance.hyper_image_sky def", "hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) if self.hyper_result is not None: self.set_hyper_dataset(result=self.hyper_result) else:", "visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples, model: af.Collection, search:", "import cosmology as cosmo import autofit as af from autoarray import preloads as", "from autogalaxy.analysis import visualizer as vis from autogalaxy.fit import fit_imaging, fit_interferometer from autogalaxy.galaxy", "result.hyper_galaxy_visibilities_path_dict ) @property def interferometer(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the", "from e def fit_imaging_for_plane( self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ): return fit_imaging.FitImaging( imaging=self.dataset,", "interferometer in this lens. Parameters ---------- instance A model instance with attributes Returns", "instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance): if hasattr(instance, \"hyper_background_noise\"): return instance.hyper_background_noise def plane_for_instance(self, instance): return", "= None self.hyper_model_visibilities = None def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict", "GridException from autoarray.inversion import pixelizations as pix, inversions as inv from autofit.exc import", "the last result, if there is one, and associates them with galaxies in", "AnalysisInterferometer(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset,", "model instance with 0 or more galaxies in its tree Returns ------- instance", "super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict ) @property def interferometer(self): return", "last result, if there is one, and associates them with galaxies in this", "= hyper_result self.cosmology = cosmology class AnalysisDataset(Analysis): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15,", "return instance def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\",", ") visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples, model: af.Collection,", "cosmology=cosmo.Planck15): self.hyper_result = hyper_result self.cosmology = cosmology class AnalysisDataset(Analysis): def __init__( self, dataset,", "imaging(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the fit of a lens", "hasattr(instance, \"hyper_image_sky\"): return instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance): if hasattr(instance, \"hyper_background_noise\"): return instance.hyper_background_noise def", ") -> af.ModelInstance: \"\"\" Takes visibilities from the last result, if there is", "a lens galaxy and source galaxy to the imaging in this lens. Parameters", "= self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return instance def fit_interferometer_for_plane( self, plane,", "self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try:", "instance): return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance: \"\"\" Takes images from", "is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in", "galaxies in its tree Returns ------- instance The input instance with visibilities associated", "af.ModelInstance: \"\"\" Takes visibilities from the last result, if there is one, and", "then an association is not made. e.g. galaxies.lens will match with: galaxies.lens but", "hyper_background_noise, use_hyper_scalings=True ): return fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, )", "as res from autogalaxy.analysis import visualizer as vis from autogalaxy.fit import fit_imaging, fit_interferometer", "self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict ) @property def interferometer(self): return self.dataset", "astropy import cosmology as cosmo import autofit as af from autoarray import preloads", "plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit,", "visibilities from the last result, if there is one, and associates them with", "visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False", "from autogalaxy.fit import fit_imaging, fit_interferometer from autogalaxy.galaxy import galaxy as g from autogalaxy.plane", "af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit", "hyper_result.search is not None: hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict = None self.hyper_model_image", "self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path ] return instance def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object(\"data\",", "None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit", "model fit and the model interferometer itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise", "cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) if self.hyper_result is not None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict", "and source galaxy to the imaging in this lens. Parameters ---------- instance A", "if visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis,", "pix, inversions as inv from autofit.exc import FitException from autogalaxy.analysis import result as", "self.cosmology) if self.hyper_model_image is not None: paths.save_object(\"hyper_model_image\", self.hyper_model_image) if self.hyper_galaxy_image_path_dict is not None:", "raise FitException from e def fit_imaging_for_plane( self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ): return", "raise FitException from e def associate_hyper_visibilities( self, instance: af.ModelInstance ) -> af.ModelInstance: \"\"\"", "= self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise", "instance The input instance with images associated with galaxies where possible. \"\"\" if", "interferometer(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the fit of a lens", "super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) if self.hyper_result is not None:", "cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) self.dataset = dataset @property def imaging(self): return self.dataset", "this model fit and the model interferometer itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance)", "attributes Returns ------- fit : Fit A fractional value indicating how well this", "value indicating how well this model fit and the model imaging itself \"\"\"", "hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise FitException from", "paths.save_object(\"hyper_model_image\", self.hyper_model_image) if self.hyper_galaxy_image_path_dict is not None: paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset):", "if there is one, and associates them with galaxies in this search where", "A fractional value indicating how well this model fit and the model interferometer", "during_analysis=during_analysis ) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict,", "af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultImaging( samples=samples, model=model, analysis=self, search=search )", "well this model fit and the model imaging itself \"\"\" self.associate_hyper_images(instance=instance) plane =", "settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) self.dataset =", "how well this model fit and the model interferometer itself \"\"\" self.associate_hyper_images(instance=instance) plane", "\"\"\" Takes images from the last result, if there is one, and associates", "itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance", "e.g. galaxies.lens will match with: galaxies.lens but not with: galaxies.lens galaxies.source Parameters ----------", "indicating how well this model fit and the model interferometer itself \"\"\" self.associate_hyper_images(instance=instance)", "result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image def hyper_image_sky_for_instance(self, instance): if hasattr(instance, \"hyper_image_sky\"):", "fractional value indicating how well this model fit and the model interferometer itself", "except (PixelizationException, InversionException, GridException) as e: raise FitException from e def associate_hyper_visibilities( self,", "hyper_background_noise, use_hyper_scalings=True ): return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def", "= self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis )", "source galaxy to the imaging in this lens. Parameters ---------- instance A model", "self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion self.preloads = preloads def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict", "self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization,", "self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology) if self.hyper_model_image is not", "hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise FitException from", "self.hyper_model_image is not None: paths.save_object(\"hyper_model_image\", self.hyper_model_image) if self.hyper_galaxy_image_path_dict is not None: paths.save_object( \"hyper_galaxy_image_path_dict\",", "visibilities associated with galaxies where possible. \"\"\" if self.hyper_galaxy_visibilities_path_dict is not None: for", "model imaging itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise =", "preloads as pload from autoarray.exc import PixelizationException, InversionException, GridException from autoarray.inversion import pixelizations", "= dataset @property def imaging(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the", "hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples,", "model instance with attributes Returns ------- fit : Fit A fractional value indicating", "self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return instance def fit_interferometer_for_plane( self, plane, hyper_background_noise,", ") @property def interferometer(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the fit", "Parameters ---------- instance A model instance with 0 or more galaxies in its", "hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, )", "g.Galaxy ): if galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path", "fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise FitException from e def associate_hyper_visibilities(", "or more galaxies in its tree Returns ------- instance The input instance with", "if self.hyper_model_image is not None: paths.save_object(\"hyper_model_image\", self.hyper_model_image) if self.hyper_galaxy_image_path_dict is not None: paths.save_object(", "\"hyper_background_noise\"): return instance.hyper_background_noise def plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance: af.ModelInstance) ->", "visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis ) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis", "cosmo import autofit as af from autoarray import preloads as pload from autoarray.exc", "def imaging(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the fit of a", "preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) if self.hyper_result is", "fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch", "super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset = dataset if self.hyper_result is not None: if hyper_result.search is", "paths.save_object(\"mask\", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(),", "use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance) plane =", "from autofit.exc import FitException from autogalaxy.analysis import result as res from autogalaxy.analysis import", "visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" )", "galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image", "not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_visibilities_path_dict:", "): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset = dataset if self.hyper_result is not None: if hyper_result.search", "GridException) as e: raise FitException from e def associate_hyper_visibilities( self, instance: af.ModelInstance )", "is one, and associates them with galaxies in this search where full-path galaxy", "): return fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self,", "preloads def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image def hyper_image_sky_for_instance(self, instance):", "self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the fit of a lens galaxy and", "input instance with images associated with galaxies where possible. \"\"\" if self.hyper_galaxy_image_path_dict is", "plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance: \"\"\" Takes images", "instance with images associated with galaxies where possible. \"\"\" if self.hyper_galaxy_image_path_dict is not", "indicating how well this model fit and the model imaging itself \"\"\" self.associate_hyper_images(instance=instance)", "in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image =", "Returns ------- instance The input instance with visibilities associated with galaxies where possible.", "autofit as af from autoarray import preloads as pload from autoarray.exc import PixelizationException,", "is not None: paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset): def __init__( self, dataset,", "dataset @property def imaging(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the fit", "= self.hyper_galaxy_image_path_dict[ galaxy_path ] return instance def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\",", "hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise FitException", "None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities = None def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result)", "use_hyper_scalings=True ): return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self,", "@property def imaging(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the fit of", "possible. \"\"\" if self.hyper_galaxy_visibilities_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy", "hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky,", "Analysis(af.Analysis): def __init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result = hyper_result self.cosmology = cosmology class AnalysisDataset(Analysis):", "(PixelizationException, InversionException, GridException) as e: raise FitException from e def associate_hyper_visibilities( self, instance:", "make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultInterferometer( samples=samples, model=model,", "return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths,", "self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise", "return fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths:", ") visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None,", "= result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image def hyper_image_sky_for_instance(self, instance): if hasattr(instance, \"hyper_image_sky\"): return instance.hyper_image_sky", "inversions as inv from autofit.exc import FitException from autogalaxy.analysis import result as res", "as inv from autofit.exc import FitException from autogalaxy.analysis import result as res from", "def fit_imaging_for_plane( self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ): return fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky,", "self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit except", "self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self,", "if self.hyper_result is not None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities = None", "preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset = dataset if self.hyper_result is not None: if", "galaxies in its tree Returns ------- instance The input instance with images associated", "fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, )", "search: af.NonLinearSearch ): return res.ResultImaging( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths:", "in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return instance def", "self.hyper_model_image = None self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion self.preloads = preloads def", "galaxy as g from autogalaxy.plane import plane as pl class Analysis(af.Analysis): def __init__(self,", "self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image = result.hyper_model_image def hyper_image_sky_for_instance(self, instance): if hasattr(instance, \"hyper_image_sky\"): return", "visualize(self, paths: af.DirectoryPaths, instance, during_analysis): instance = self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky =", "af.ModelInstance ) -> af.ModelInstance: \"\"\" Takes visibilities from the last result, if there", "lens galaxy and source galaxy to the imaging in this lens. Parameters ----------", "from autoarray import preloads as pload from autoarray.exc import PixelizationException, InversionException, GridException from", "return res.ResultImaging( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\",", "self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit", "self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset = dataset", "plane, hyper_background_noise, use_hyper_scalings=True ): return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, )", "plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit =", "fit and the model interferometer itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise =", "with images associated with galaxies where possible. \"\"\" if self.hyper_galaxy_image_path_dict is not None:", "g from autogalaxy.plane import plane as pl class Analysis(af.Analysis): def __init__(self, hyper_result=None, cosmology=cosmo.Planck15):", "paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology) if self.hyper_model_image is not None: paths.save_object(\"hyper_model_image\", self.hyper_model_image) if self.hyper_galaxy_image_path_dict", "settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): instance = self.associate_hyper_images(instance=instance) plane =", "paths: af.DirectoryPaths, instance, during_analysis): instance = self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance)", "\"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance )", "if hasattr(instance, \"hyper_image_sky\"): return instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance): if hasattr(instance, \"hyper_background_noise\"): return instance.hyper_background_noise", "hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): instance", "= vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis ) if fit.inversion is not None: visualizer.visualize_inversion(", "hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, )", ") if visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis,", "this lens. Parameters ---------- instance A model instance with attributes Returns ------- fit", "associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance: \"\"\" Takes images from the last result, if", "instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[", "search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset):", "cosmology class AnalysisDataset(Analysis): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ):", "pl class Analysis(af.Analysis): def __init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result = hyper_result self.cosmology = cosmology", "------- instance The input instance with visibilities associated with galaxies where possible. \"\"\"", "): return res.ResultInterferometer( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths)", "\"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit =", "pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance: \"\"\" Takes images from the last", "None: if hyper_result.search is not None: hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict =", "def associate_hyper_visibilities( self, instance: af.ModelInstance ) -> af.ModelInstance: \"\"\" Takes visibilities from the", "with visibilities associated with galaxies where possible. \"\"\" if self.hyper_galaxy_visibilities_path_dict is not None:", "fit_interferometer from autogalaxy.galaxy import galaxy as g from autogalaxy.plane import plane as pl", "a lens galaxy and source galaxy to the interferometer in this lens. Parameters", "for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities =", "import fit_imaging, fit_interferometer from autogalaxy.galaxy import galaxy as g from autogalaxy.plane import plane", "self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise,", "dataset if self.hyper_result is not None: if hyper_result.search is not None: hyper_result.search.paths =", "name then an association is not made. e.g. galaxies.lens will match with: galaxies.lens", "hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) visualizer =", "its tree Returns ------- instance The input instance with images associated with galaxies", "in its tree Returns ------- instance The input instance with visibilities associated with", "fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance,", "galaxy names match. If the galaxy collection has a different name then an", "result as res from autogalaxy.analysis import visualizer as vis from autogalaxy.fit import fit_imaging,", "self.settings_inversion = settings_inversion self.preloads = preloads def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict self.hyper_model_image", "e def associate_hyper_visibilities( self, instance: af.ModelInstance ) -> af.ModelInstance: \"\"\" Takes visibilities from", "instance The input instance with visibilities associated with galaxies where possible. \"\"\" if", "class Analysis(af.Analysis): def __init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result = hyper_result self.cosmology = cosmology class", "def log_likelihood_function(self, instance): \"\"\" Determine the fit of a lens galaxy and source", "plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_interferometer_for_plane( plane=plane,", "visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples, model: af.Collection, search:", ") visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion is not None: visualizer.visualize_inversion(", "def interferometer(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine the fit of a", "collection has a different name then an association is not made. e.g. galaxies.lens", "def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data) paths.save_object(\"noise_map\", self.dataset.noise_map) paths.save_object(\"settings_dataset\", self.dataset.settings) paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\",", "def __init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result = hyper_result self.cosmology = cosmology class AnalysisDataset(Analysis): def", "A model instance with 0 or more galaxies in its tree Returns -------", "model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask)", "and source galaxy to the interferometer in this lens. Parameters ---------- instance A", "visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit =", "0 or more galaxies in its tree Returns ------- instance The input instance", ") self.dataset = dataset @property def imaging(self): return self.dataset def log_likelihood_function(self, instance): \"\"\"", "= self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return", "search: af.NonLinearSearch ): return res.ResultInterferometer( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths:", "if self.hyper_galaxy_image_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if", "with galaxies where possible. \"\"\" if self.hyper_galaxy_visibilities_path_dict is not None: for galaxy_path, galaxy", "= self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path ] return instance def save_attributes_for_aggregator(self, paths: af.DirectoryPaths):", "as pload from autoarray.exc import PixelizationException, InversionException, GridException from autoarray.inversion import pixelizations as", "instance A model instance with 0 or more galaxies in its tree Returns", "autoarray.inversion import pixelizations as pix, inversions as inv from autofit.exc import FitException from", "its tree Returns ------- instance The input instance with visibilities associated with galaxies", "settings_inversion=settings_inversion, preloads=preloads, ) if self.hyper_result is not None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict = None", "tree Returns ------- instance The input instance with visibilities associated with galaxies where", "import result as res from autogalaxy.analysis import visualizer as vis from autogalaxy.fit import", "in this search where full-path galaxy names match. If the galaxy collection has", "Returns ------- instance The input instance with images associated with galaxies where possible.", "settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise", "possible. \"\"\" if self.hyper_galaxy_image_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy", "tree Returns ------- instance The input instance with images associated with galaxies where", "input instance with visibilities associated with galaxies where possible. \"\"\" if self.hyper_galaxy_visibilities_path_dict is", "return res.ResultInterferometer( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"uv_wavelengths\",", "plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging(", "with: galaxies.lens but not with: galaxies.lens galaxies.source Parameters ---------- instance A model instance", "the galaxy collection has a different name then an association is not made.", "instance, during_analysis): self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit =", ") fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit,", "autogalaxy.galaxy import galaxy as g from autogalaxy.plane import plane as pl class Analysis(af.Analysis):", "= self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky,", "galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path ] return instance def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): paths.save_object(\"data\", self.dataset.data)", "= self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise,", "result.hyper_model_image def hyper_image_sky_for_instance(self, instance): if hasattr(instance, \"hyper_image_sky\"): return instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance): if", "fit of a lens galaxy and source galaxy to the interferometer in this", "fit.figure_of_merit except (PixelizationException, InversionException, GridException) as e: raise FitException from e def fit_imaging_for_plane(", "self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_imaging_for_plane(", "fit=fit, during_analysis=during_analysis ) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images(", "settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): instance = self.associate_hyper_images(instance=instance) plane", "cosmology as cosmo import autofit as af from autoarray import preloads as pload", "instance=instance ) try: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit except (PixelizationException,", "class AnalysisInterferometer(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(", "lens. Parameters ---------- instance A model instance with attributes Returns ------- fit :", "af.ModelInstance) -> af.ModelInstance: \"\"\" Takes images from the last result, if there is", "instance=instance ) try: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) return fit.figure_of_merit except", "a different name then an association is not made. e.g. galaxies.lens will match", "galaxies.lens will match with: galaxies.lens but not with: galaxies.lens galaxies.source Parameters ---------- instance", "to the imaging in this lens. Parameters ---------- instance A model instance with", "self, instance: af.ModelInstance ) -> af.ModelInstance: \"\"\" Takes visibilities from the last result,", "self.hyper_galaxy_visibilities_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path", "The input instance with images associated with galaxies where possible. \"\"\" if self.hyper_galaxy_image_path_dict", "self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis ) if", "res from autogalaxy.analysis import visualizer as vis from autogalaxy.fit import fit_imaging, fit_interferometer from", "fit_interferometer_for_plane( self, plane, hyper_background_noise, use_hyper_scalings=True ): return fit_interferometer.FitInterferometer( interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization,", "not made. e.g. galaxies.lens will match with: galaxies.lens but not with: galaxies.lens galaxies.source", "imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance,", "paths.save_object(\"cosmology\", self.cosmology) if self.hyper_model_image is not None: paths.save_object(\"hyper_model_image\", self.hyper_model_image) if self.hyper_galaxy_image_path_dict is not", "Parameters ---------- instance A model instance with attributes Returns ------- fit : Fit", "except (PixelizationException, InversionException, GridException) as e: raise FitException from e def fit_imaging_for_plane( self,", "self.hyper_result = hyper_result self.cosmology = cosmology class AnalysisDataset(Analysis): def __init__( self, dataset, hyper_result=None,", "hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion is not", "Determine the fit of a lens galaxy and source galaxy to the imaging", "hyper_background_noise=hyper_background_noise, ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion is not None:", "subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return", "plane=plane, hyper_background_noise=hyper_background_noise ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis ) if fit.inversion", "fit_imaging, fit_interferometer from autogalaxy.galaxy import galaxy as g from autogalaxy.plane import plane as", "af.Collection, search: af.NonLinearSearch ): return res.ResultImaging( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self,", "of a lens galaxy and source galaxy to the imaging in this lens.", "= self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit =", "def plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance: af.ModelInstance) -> af.ModelInstance: \"\"\" Takes", "None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict = None self.hyper_model_image = None self.settings_pixelization = settings_pixelization self.settings_inversion", "else: self.hyper_galaxy_image_path_dict = None self.hyper_model_image = None self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion", "one, and associates them with galaxies in this search where full-path galaxy names", "if hyper_result.search is not None: hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict = None", "match. If the galaxy collection has a different name then an association is", "If the galaxy collection has a different name then an association is not", "instance with 0 or more galaxies in its tree Returns ------- instance The", "self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ): return fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings,", "paths.save_object(\"settings_inversion\", self.settings_inversion) paths.save_object(\"settings_pixelization\", self.settings_pixelization) paths.save_object(\"cosmology\", self.cosmology) if self.hyper_model_image is not None: paths.save_object(\"hyper_model_image\", self.hyper_model_image)", "def make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ): return res.ResultImaging( samples=samples,", "fit and the model imaging itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky =", "dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset = dataset if", "( result.hyper_galaxy_visibilities_path_dict ) @property def interferometer(self): return self.dataset def log_likelihood_function(self, instance): \"\"\" Determine", "self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict = None self.hyper_model_image = None self.settings_pixelization = settings_pixelization self.settings_inversion =", "where possible. \"\"\" if self.hyper_galaxy_image_path_dict is not None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class(", "hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): instance =", "autogalaxy.plane import plane as pl class Analysis(af.Analysis): def __init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result =", "hyper_image_sky_for_instance(self, instance): if hasattr(instance, \"hyper_image_sky\"): return instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance): if hasattr(instance, \"hyper_background_noise\"):", "galaxy and source galaxy to the imaging in this lens. Parameters ---------- instance", ") class AnalysisImaging(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ):", "from autoarray.inversion import pixelizations as pix, inversions as inv from autofit.exc import FitException", "inv from autofit.exc import FitException from autogalaxy.analysis import result as res from autogalaxy.analysis", "FitException from e def associate_hyper_visibilities( self, instance: af.ModelInstance ) -> af.ModelInstance: \"\"\" Takes", "Takes images from the last result, if there is one, and associates them", "None: hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_image_path_dict = None self.hyper_model_image = None self.settings_pixelization", "= vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis", "is not None: if hyper_result.search is not None: hyper_result.search.paths = None self.set_hyper_dataset(result=self.hyper_result) else:", "model fit and the model imaging itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky", "galaxy to the imaging in this lens. Parameters ---------- instance A model instance", "as e: raise FitException from e def fit_imaging_for_plane( self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True", "PixelizationException, InversionException, GridException from autoarray.inversion import pixelizations as pix, inversions as inv from", "instance: af.ModelInstance) -> af.ModelInstance: \"\"\" Takes images from the last result, if there", "if visualizer.plot_fit_no_hyper: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=None, use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\"", "): if galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path ]", "plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self,", "= self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise )", "def visualize(self, paths: af.DirectoryPaths, instance, during_analysis): instance = self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky", "more galaxies in its tree Returns ------- instance The input instance with visibilities", "None: paths.save_object(\"hyper_model_image\", self.hyper_model_image) if self.hyper_galaxy_image_path_dict is not None: paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict ) class", "hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples:", "visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image, plane=plane, ) if visualizer.plot_fit_no_hyper: fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None,", "= result.hyper_model_image def hyper_image_sky_for_instance(self, instance): if hasattr(instance, \"hyper_image_sky\"): return instance.hyper_image_sky def hyper_background_noise_for_instance(self, instance):", "associated with galaxies where possible. \"\"\" if self.hyper_galaxy_image_path_dict is not None: for galaxy_path,", "hasattr(instance, \"hyper_background_noise\"): return instance.hyper_background_noise def plane_for_instance(self, instance): return pl.Plane(galaxies=instance.galaxies) def associate_hyper_images(self, instance: af.ModelInstance)", "res.ResultImaging( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized)", ") try: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit except (PixelizationException, InversionException,", "= self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer =", "during_analysis=during_analysis) if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image,", "self.hyper_model_image) if self.hyper_galaxy_image_path_dict is not None: paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset): def", ") visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis ) if fit.inversion is not", "af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def __init__( self, dataset, hyper_result=None,", "paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(),", "and the model imaging itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance)", ") if fit.inversion is not None: visualizer.visualize_inversion( inversion=fit.inversion, during_analysis=during_analysis ) visualizer.visualize_hyper_images( hyper_galaxy_image_path_dict=self.hyper_galaxy_image_path_dict, hyper_model_image=self.hyper_model_image,", "where full-path galaxy names match. If the galaxy collection has a different name", "interferometer=self.dataset, plane=plane, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def visualize(self, paths: af.DirectoryPaths, instance, during_analysis):", "instance=instance ) fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging)", "different name then an association is not made. e.g. galaxies.lens will match with:", "InversionException, GridException) as e: raise FitException from e def fit_imaging_for_plane( self, plane, hyper_image_sky,", "None def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict )", "InversionException, GridException) as e: raise FitException from e def associate_hyper_visibilities( self, instance: af.ModelInstance", "from autogalaxy.plane import plane as pl class Analysis(af.Analysis): def __init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result", "instance: af.ModelInstance ) -> af.ModelInstance: \"\"\" Takes visibilities from the last result, if", "autofit.exc import FitException from autogalaxy.analysis import result as res from autogalaxy.analysis import visualizer", "galaxies.source Parameters ---------- instance A model instance with 0 or more galaxies in", "= result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict ) @property def interferometer(self): return self.dataset def", "self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask) class AnalysisInterferometer(AnalysisDataset): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(),", "self.hyper_result is not None: self.set_hyper_dataset(result=self.hyper_result) else: self.hyper_galaxy_visibilities_path_dict = None self.hyper_model_visibilities = None def", "None self.hyper_model_visibilities = None def set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict =", "associated with galaxies where possible. \"\"\" if self.hyper_galaxy_visibilities_path_dict is not None: for galaxy_path,", "pixelizations as pix, inversions as inv from autofit.exc import FitException from autogalaxy.analysis import", "set_hyper_dataset(self, result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict ) @property def", "model interferometer itself \"\"\" self.associate_hyper_images(instance=instance) plane = self.plane_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance )", "full-path galaxy names match. If the galaxy collection has a different name then", "def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__(hyper_result=hyper_result, cosmology=cosmology) self.dataset", "try: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) return fit.figure_of_merit except (PixelizationException, InversionException, GridException)", "names match. If the galaxy collection has a different name then an association", "galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_image_path_dict: galaxy.hyper_model_image = self.hyper_model_image", "import visualizer as vis from autogalaxy.fit import fit_imaging, fit_interferometer from autogalaxy.galaxy import galaxy", "self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, )", "= ( result.hyper_galaxy_visibilities_path_dict ) @property def interferometer(self): return self.dataset def log_likelihood_function(self, instance): \"\"\"", "hyper_result self.cosmology = cosmology class AnalysisDataset(Analysis): def __init__( self, dataset, hyper_result=None, cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(),", "model: af.Collection, search: af.NonLinearSearch ): return res.ResultInterferometer( samples=samples, model=model, analysis=self, search=search ) def", "as cosmo import autofit as af from autoarray import preloads as pload from", "preloads=preloads, ) self.dataset = dataset @property def imaging(self): return self.dataset def log_likelihood_function(self, instance):", "super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) self.dataset = dataset @property def", "galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities", "fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" )", "= None self.settings_pixelization = settings_pixelization self.settings_inversion = settings_inversion self.preloads = preloads def set_hyper_dataset(self,", "hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) return", "galaxy.hyper_model_image = self.hyper_model_image galaxy.hyper_galaxy_image = self.hyper_galaxy_image_path_dict[ galaxy_path ] return instance def save_attributes_for_aggregator(self, paths:", "cosmology=cosmo.Planck15, settings_pixelization=pix.SettingsPixelization(), settings_inversion=inv.SettingsInversion(), preloads=pload.Preloads(), ): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, )", "log_likelihood_function(self, instance): \"\"\" Determine the fit of a lens galaxy and source galaxy", "match with: galaxies.lens but not with: galaxies.lens galaxies.source Parameters ---------- instance A model", "made. e.g. galaxies.lens will match with: galaxies.lens but not with: galaxies.lens galaxies.source Parameters", "plane as pl class Analysis(af.Analysis): def __init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result = hyper_result self.cosmology", "= dataset if self.hyper_result is not None: if hyper_result.search is not None: hyper_result.search.paths", "None: for galaxy_path, galaxy in instance.path_instance_tuples_for_class( g.Galaxy ): if galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities", "): if galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path ]", "settings_pixelization self.settings_inversion = settings_inversion self.preloads = preloads def set_hyper_dataset(self, result): self.hyper_galaxy_image_path_dict = result.hyper_galaxy_image_path_dict", "): super().__init__( dataset=dataset, hyper_result=hyper_result, cosmology=cosmology, settings_pixelization=settings_pixelization, settings_inversion=settings_inversion, preloads=preloads, ) if self.hyper_result is not", "autogalaxy.fit import fit_imaging, fit_interferometer from autogalaxy.galaxy import galaxy as g from autogalaxy.plane import", "hyper_image_sky = self.hyper_image_sky_for_instance(instance=instance) hyper_background_noise = self.hyper_background_noise_for_instance( instance=instance ) try: fit = self.fit_imaging_for_plane( plane=plane,", "fit_imaging_for_plane( self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ): return fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise,", "if galaxy_path in self.hyper_galaxy_visibilities_path_dict: galaxy.hyper_model_visibilities = self.hyper_model_visibilities galaxy.hyper_galaxy_visibilities = self.hyper_galaxy_visibilities_path_dict[ galaxy_path ] return", "import plane as pl class Analysis(af.Analysis): def __init__(self, hyper_result=None, cosmology=cosmo.Planck15): self.hyper_result = hyper_result", "galaxies.lens but not with: galaxies.lens galaxies.source Parameters ---------- instance A model instance with", "not None: paths.save_object( \"hyper_galaxy_image_path_dict\", self.hyper_galaxy_image_path_dict ) class AnalysisImaging(AnalysisDataset): def __init__( self, dataset, hyper_result=None,", "] return instance def fit_interferometer_for_plane( self, plane, hyper_background_noise, use_hyper_scalings=True ): return fit_interferometer.FitInterferometer( interferometer=self.dataset,", "during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples, model: af.Collection, search: af.NonLinearSearch ):", "e def fit_imaging_for_plane( self, plane, hyper_image_sky, hyper_background_noise, use_hyper_scalings=True ): return fit_imaging.FitImaging( imaging=self.dataset, plane=plane,", "= self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis) if", "autogalaxy.analysis import visualizer as vis from autogalaxy.fit import fit_imaging, fit_interferometer from autogalaxy.galaxy import", "= self.fit_imaging_for_plane( plane=plane, hyper_image_sky=None, hyper_background_noise=None, use_hyper_scalings=False, ) visualizer.visualize_fit_imaging( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def", "---------- instance A model instance with attributes Returns ------- fit : Fit A", "from the last result, if there is one, and associates them with galaxies", "use_hyper_scalings=True ): return fit_imaging.FitImaging( imaging=self.dataset, plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, use_hyper_scalings=use_hyper_scalings, settings_pixelization=self.settings_pixelization, settings_inversion=self.settings_inversion, ) def", "fit = self.fit_interferometer_for_plane( plane=plane, hyper_background_noise=hyper_background_noise ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis", "analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"psf\", self.dataset.psf_unormalized) paths.save_object(\"mask\", self.dataset.mask) class", "as e: raise FitException from e def associate_hyper_visibilities( self, instance: af.ModelInstance ) ->", "res.ResultInterferometer( samples=samples, model=model, analysis=self, search=search ) def save_attributes_for_aggregator(self, paths: af.DirectoryPaths): super().save_attributes_for_aggregator(paths=paths) paths.save_object(\"uv_wavelengths\", self.dataset.uv_wavelengths)", "fit = self.fit_imaging_for_plane( plane=plane, hyper_image_sky=hyper_image_sky, hyper_background_noise=hyper_background_noise, ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_imaging(imaging=self.imaging) visualizer.visualize_fit_imaging(fit=fit, during_analysis=during_analysis)", "as af from autoarray import preloads as pload from autoarray.exc import PixelizationException, InversionException,", "has a different name then an association is not made. e.g. galaxies.lens will", "fractional value indicating how well this model fit and the model imaging itself", "use_hyper_scalings=False ) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis, subfolders=\"fit_no_hyper\" ) def make_result( self, samples: af.PDFSamples, model:", "hyper_background_noise=hyper_background_noise ) visualizer = vis.Visualizer(visualize_path=paths.image_path) visualizer.visualize_interferometer(interferometer=self.interferometer) visualizer.visualize_fit_interferometer( fit=fit, during_analysis=during_analysis ) if fit.inversion is", "result): super().set_hyper_dataset(result=result) self.hyper_model_visibilities = result.hyper_model_visibilities self.hyper_galaxy_visibilities_path_dict = ( result.hyper_galaxy_visibilities_path_dict ) @property def interferometer(self):", "cosmology=cosmology) self.dataset = dataset if self.hyper_result is not None: if hyper_result.search is not" ]
[ "w.load(index_path) w.on_gui_event += on_event return w def sleep(x): time.sleep(x) def on_js_event(msg): print(msg) print(msg['testdict'])", "\"hi\" #sys.stdin.readline() print(line) w.exec_js('console.log(\"hi\")'); def main(): w = setupElectron('index.html', on_js_event) w.run(on_update, 1000) main()", "setupElectron(index_path, on_event): w = pwkg.Window(100, 100, \"PWKG Window\", debug=True) w.load(index_path) w.on_gui_event += on_event", "import sys, time sys.path.append('..') import pwkg def setupElectron(index_path, on_event): w = pwkg.Window(100, 100,", "print(msg) print(msg['testdict']) #TODO: this blocks main thread def on_update(w): #from multiprocessing import Process", "import Process #p = Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\") line = \"hi\" #sys.stdin.readline() print(line) w.exec_js('console.log(\"hi\")');", "sys.path.append('..') import pwkg def setupElectron(index_path, on_event): w = pwkg.Window(100, 100, \"PWKG Window\", debug=True)", "def sleep(x): time.sleep(x) def on_js_event(msg): print(msg) print(msg['testdict']) #TODO: this blocks main thread def", "python import sys, time sys.path.append('..') import pwkg def setupElectron(index_path, on_event): w = pwkg.Window(100,", "#p = Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\") line = \"hi\" #sys.stdin.readline() print(line) w.exec_js('console.log(\"hi\")'); def main():", "return w def sleep(x): time.sleep(x) def on_js_event(msg): print(msg) print(msg['testdict']) #TODO: this blocks main", "def on_js_event(msg): print(msg) print(msg['testdict']) #TODO: this blocks main thread def on_update(w): #from multiprocessing", "#p.start() print(\"yo\") line = \"hi\" #sys.stdin.readline() print(line) w.exec_js('console.log(\"hi\")'); def main(): w = setupElectron('index.html',", "on_js_event(msg): print(msg) print(msg['testdict']) #TODO: this blocks main thread def on_update(w): #from multiprocessing import", "w.on_gui_event += on_event return w def sleep(x): time.sleep(x) def on_js_event(msg): print(msg) print(msg['testdict']) #TODO:", "#!/usr/bin/env python import sys, time sys.path.append('..') import pwkg def setupElectron(index_path, on_event): w =", "this blocks main thread def on_update(w): #from multiprocessing import Process #p = Process(target=lambda:adblib.screenshot(pic_path))", "= pwkg.Window(100, 100, \"PWKG Window\", debug=True) w.load(index_path) w.on_gui_event += on_event return w def", "#TODO: this blocks main thread def on_update(w): #from multiprocessing import Process #p =", "100, \"PWKG Window\", debug=True) w.load(index_path) w.on_gui_event += on_event return w def sleep(x): time.sleep(x)", "= Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\") line = \"hi\" #sys.stdin.readline() print(line) w.exec_js('console.log(\"hi\")'); def main(): w", "w def sleep(x): time.sleep(x) def on_js_event(msg): print(msg) print(msg['testdict']) #TODO: this blocks main thread", "<reponame>KostyaKow/PyWebKitGtk-wrapper<gh_stars>0 #!/usr/bin/env python import sys, time sys.path.append('..') import pwkg def setupElectron(index_path, on_event): w", "time sys.path.append('..') import pwkg def setupElectron(index_path, on_event): w = pwkg.Window(100, 100, \"PWKG Window\",", "debug=True) w.load(index_path) w.on_gui_event += on_event return w def sleep(x): time.sleep(x) def on_js_event(msg): print(msg)", "sys, time sys.path.append('..') import pwkg def setupElectron(index_path, on_event): w = pwkg.Window(100, 100, \"PWKG", "def setupElectron(index_path, on_event): w = pwkg.Window(100, 100, \"PWKG Window\", debug=True) w.load(index_path) w.on_gui_event +=", "def on_update(w): #from multiprocessing import Process #p = Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\") line =", "sleep(x): time.sleep(x) def on_js_event(msg): print(msg) print(msg['testdict']) #TODO: this blocks main thread def on_update(w):", "Process #p = Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\") line = \"hi\" #sys.stdin.readline() print(line) w.exec_js('console.log(\"hi\")'); def", "\"PWKG Window\", debug=True) w.load(index_path) w.on_gui_event += on_event return w def sleep(x): time.sleep(x) def", "print(msg['testdict']) #TODO: this blocks main thread def on_update(w): #from multiprocessing import Process #p", "blocks main thread def on_update(w): #from multiprocessing import Process #p = Process(target=lambda:adblib.screenshot(pic_path)) #p.start()", "on_event): w = pwkg.Window(100, 100, \"PWKG Window\", debug=True) w.load(index_path) w.on_gui_event += on_event return", "line = \"hi\" #sys.stdin.readline() print(line) w.exec_js('console.log(\"hi\")'); def main(): w = setupElectron('index.html', on_js_event) w.run(on_update,", "w = pwkg.Window(100, 100, \"PWKG Window\", debug=True) w.load(index_path) w.on_gui_event += on_event return w", "on_event return w def sleep(x): time.sleep(x) def on_js_event(msg): print(msg) print(msg['testdict']) #TODO: this blocks", "on_update(w): #from multiprocessing import Process #p = Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\") line = \"hi\"", "Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\") line = \"hi\" #sys.stdin.readline() print(line) w.exec_js('console.log(\"hi\")'); def main(): w =", "print(\"yo\") line = \"hi\" #sys.stdin.readline() print(line) w.exec_js('console.log(\"hi\")'); def main(): w = setupElectron('index.html', on_js_event)", "multiprocessing import Process #p = Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\") line = \"hi\" #sys.stdin.readline() print(line)", "import pwkg def setupElectron(index_path, on_event): w = pwkg.Window(100, 100, \"PWKG Window\", debug=True) w.load(index_path)", "main thread def on_update(w): #from multiprocessing import Process #p = Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\")", "pwkg def setupElectron(index_path, on_event): w = pwkg.Window(100, 100, \"PWKG Window\", debug=True) w.load(index_path) w.on_gui_event", "pwkg.Window(100, 100, \"PWKG Window\", debug=True) w.load(index_path) w.on_gui_event += on_event return w def sleep(x):", "thread def on_update(w): #from multiprocessing import Process #p = Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\") line", "= \"hi\" #sys.stdin.readline() print(line) w.exec_js('console.log(\"hi\")'); def main(): w = setupElectron('index.html', on_js_event) w.run(on_update, 1000)", "Window\", debug=True) w.load(index_path) w.on_gui_event += on_event return w def sleep(x): time.sleep(x) def on_js_event(msg):", "time.sleep(x) def on_js_event(msg): print(msg) print(msg['testdict']) #TODO: this blocks main thread def on_update(w): #from", "#from multiprocessing import Process #p = Process(target=lambda:adblib.screenshot(pic_path)) #p.start() print(\"yo\") line = \"hi\" #sys.stdin.readline()", "+= on_event return w def sleep(x): time.sleep(x) def on_js_event(msg): print(msg) print(msg['testdict']) #TODO: this" ]
[ "from .filters import * from .forms import * # class CustomerIndexView(LoginRequiredMixin, View): #", "template_name = 'customer/customer_details.html' def CustomerLedgerView(request, pk): customer = Customer.objects.get(id=pk) ctx = {'customer': customer}", "class CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm paginate_by = 10 template_name = 'customer/create_customer.html' success_message =", "paginate_by = 10 class CustomerUpdateView(OwnerUpdateView): model = Customer template_name = 'customer/customer_edit.html' form_class =", "Customer template_name = 'customer/customer_details.html' def CustomerLedgerView(request, pk): customer = Customer.objects.get(id=pk) ctx = {'customer':", "reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView): model = Customer template_name = 'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list') success_message", "CustomerFilter queryset = Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html' paginate_by = 10 class CustomerUpdateView(OwnerUpdateView): model", "template_name = 'customer/customer_edit.html' form_class = CustomerUpdateForm success_message = \"%(name)s was updated successfully.\" success_url", "\"Session %(name)s was removed successfully\" class CustomerDetailsView(OwnerDetailView): model = Customer template_name = 'customer/customer_details.html'", "* from .forms import * # class CustomerIndexView(LoginRequiredMixin, View): # login_url = '/login/'", "CustomerLedgerView(request, pk): customer = Customer.objects.get(id=pk) ctx = {'customer': customer} return render(request, 'customer/customer_ledger.html', ctx)", "success_message = \"%(name)s was created successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView): filterset_class =", "reverse_lazy('customer:customer_list') success_message = \"Session %(name)s was removed successfully\" class CustomerDetailsView(OwnerDetailView): model = Customer", "template_name = 'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list') success_message = \"Session %(name)s was removed successfully\"", "class CustomerDetailsView(OwnerDetailView): model = Customer template_name = 'customer/customer_details.html' def CustomerLedgerView(request, pk): customer =", "import * from .filters import * from .forms import * # class CustomerIndexView(LoginRequiredMixin,", "class CustomerDeleteView(OwnerDeleteView): model = Customer template_name = 'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list') success_message =", "reverse_lazy from customer.owner import * from .filters import * from .forms import *", "= Customer template_name = 'customer/customer_edit.html' form_class = CustomerUpdateForm success_message = \"%(name)s was updated", "CustomerIndexView(LoginRequiredMixin, View): # login_url = '/login/' # redirect_field_name = 'redirect_to' class CustomerCreateView(OwnerCreateView): form_class", "template_name = 'customer/create_customer.html' success_message = \"%(name)s was created successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class", "successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView): filterset_class = CustomerFilter queryset = Customer.objects.filter(is_deleted=False) template_name", "CustomerDeleteView(OwnerDeleteView): model = Customer template_name = 'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list') success_message = \"Session", "'/login/' # redirect_field_name = 'redirect_to' class CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm paginate_by = 10", "= 'redirect_to' class CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm paginate_by = 10 template_name = 'customer/create_customer.html'", "was removed successfully\" class CustomerDetailsView(OwnerDetailView): model = Customer template_name = 'customer/customer_details.html' def CustomerLedgerView(request,", "import reverse_lazy from customer.owner import * from .filters import * from .forms import", "redirect_field_name = 'redirect_to' class CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm paginate_by = 10 template_name =", "= 'customer/create_customer.html' success_message = \"%(name)s was created successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView):", "Customer template_name = 'customer/customer_edit.html' form_class = CustomerUpdateForm success_message = \"%(name)s was updated successfully.\"", "class CustomerIndexView(LoginRequiredMixin, View): # login_url = '/login/' # redirect_field_name = 'redirect_to' class CustomerCreateView(OwnerCreateView):", "Customer template_name = 'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list') success_message = \"Session %(name)s was removed", "10 class CustomerUpdateView(OwnerUpdateView): model = Customer template_name = 'customer/customer_edit.html' form_class = CustomerUpdateForm success_message", "from django.shortcuts import render from django.urls import reverse_lazy from customer.owner import * from", "= reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView): filterset_class = CustomerFilter queryset = Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html'", "was updated successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView): model = Customer template_name =", "CustomerListView(OwnerListView): filterset_class = CustomerFilter queryset = Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html' paginate_by = 10", "removed successfully\" class CustomerDetailsView(OwnerDetailView): model = Customer template_name = 'customer/customer_details.html' def CustomerLedgerView(request, pk):", ".filters import * from .forms import * # class CustomerIndexView(LoginRequiredMixin, View): # login_url", "successfully\" class CustomerDetailsView(OwnerDetailView): model = Customer template_name = 'customer/customer_details.html' def CustomerLedgerView(request, pk): customer", "from .forms import * # class CustomerIndexView(LoginRequiredMixin, View): # login_url = '/login/' #", "= \"Session %(name)s was removed successfully\" class CustomerDetailsView(OwnerDetailView): model = Customer template_name =", "login_url = '/login/' # redirect_field_name = 'redirect_to' class CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm paginate_by", "CustomerUpdateView(OwnerUpdateView): model = Customer template_name = 'customer/customer_edit.html' form_class = CustomerUpdateForm success_message = \"%(name)s", "* from .filters import * from .forms import * # class CustomerIndexView(LoginRequiredMixin, View):", "render from django.urls import reverse_lazy from customer.owner import * from .filters import *", "= \"%(name)s was updated successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView): model = Customer", "'customer/customer_details.html' def CustomerLedgerView(request, pk): customer = Customer.objects.get(id=pk) ctx = {'customer': customer} return render(request,", "= CustomerUpdateForm success_message = \"%(name)s was updated successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView):", "import render from django.urls import reverse_lazy from customer.owner import * from .filters import", "= CustomerFilter queryset = Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html' paginate_by = 10 class CustomerUpdateView(OwnerUpdateView):", "import * # class CustomerIndexView(LoginRequiredMixin, View): # login_url = '/login/' # redirect_field_name =", "= 'customer/customer_details.html' def CustomerLedgerView(request, pk): customer = Customer.objects.get(id=pk) ctx = {'customer': customer} return", "= 'customer/customer_list.html' paginate_by = 10 class CustomerUpdateView(OwnerUpdateView): model = Customer template_name = 'customer/customer_edit.html'", "form_class = CustomerUpdateForm success_message = \"%(name)s was updated successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class", "'customer/customer_list.html' paginate_by = 10 class CustomerUpdateView(OwnerUpdateView): model = Customer template_name = 'customer/customer_edit.html' form_class", "CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm paginate_by = 10 template_name = 'customer/create_customer.html' success_message = \"%(name)s", "10 template_name = 'customer/create_customer.html' success_message = \"%(name)s was created successfully.\" success_url = reverse_lazy(\"customer:customer_list\")", "updated successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView): model = Customer template_name = 'customer/customer_delete.html'", "= '/login/' # redirect_field_name = 'redirect_to' class CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm paginate_by =", "%(name)s was removed successfully\" class CustomerDetailsView(OwnerDetailView): model = Customer template_name = 'customer/customer_details.html' def", "# login_url = '/login/' # redirect_field_name = 'redirect_to' class CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm", "was created successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView): filterset_class = CustomerFilter queryset =", "= 10 template_name = 'customer/create_customer.html' success_message = \"%(name)s was created successfully.\" success_url =", "model = Customer template_name = 'customer/customer_edit.html' form_class = CustomerUpdateForm success_message = \"%(name)s was", "queryset = Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html' paginate_by = 10 class CustomerUpdateView(OwnerUpdateView): model =", "success_url = reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView): filterset_class = CustomerFilter queryset = Customer.objects.filter(is_deleted=False) template_name =", "paginate_by = 10 template_name = 'customer/create_customer.html' success_message = \"%(name)s was created successfully.\" success_url", "= Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html' paginate_by = 10 class CustomerUpdateView(OwnerUpdateView): model = Customer", "= 'customer/customer_edit.html' form_class = CustomerUpdateForm success_message = \"%(name)s was updated successfully.\" success_url =", "customer.owner import * from .filters import * from .forms import * # class", "# redirect_field_name = 'redirect_to' class CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm paginate_by = 10 template_name", "'redirect_to' class CustomerCreateView(OwnerCreateView): form_class = CustomerCreateForm paginate_by = 10 template_name = 'customer/create_customer.html' success_message", "form_class = CustomerCreateForm paginate_by = 10 template_name = 'customer/create_customer.html' success_message = \"%(name)s was", "class CustomerUpdateView(OwnerUpdateView): model = Customer template_name = 'customer/customer_edit.html' form_class = CustomerUpdateForm success_message =", "reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView): filterset_class = CustomerFilter queryset = Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html' paginate_by", "django.shortcuts import render from django.urls import reverse_lazy from customer.owner import * from .filters", "* # class CustomerIndexView(LoginRequiredMixin, View): # login_url = '/login/' # redirect_field_name = 'redirect_to'", "= 'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list') success_message = \"Session %(name)s was removed successfully\" class", "\"%(name)s was created successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView): filterset_class = CustomerFilter queryset", "template_name = 'customer/customer_list.html' paginate_by = 10 class CustomerUpdateView(OwnerUpdateView): model = Customer template_name =", "= Customer template_name = 'customer/customer_details.html' def CustomerLedgerView(request, pk): customer = Customer.objects.get(id=pk) ctx =", "django.urls import reverse_lazy from customer.owner import * from .filters import * from .forms", "= reverse_lazy('customer:customer_list') success_message = \"Session %(name)s was removed successfully\" class CustomerDetailsView(OwnerDetailView): model =", "'customer/create_customer.html' success_message = \"%(name)s was created successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView): filterset_class", "Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html' paginate_by = 10 class CustomerUpdateView(OwnerUpdateView): model = Customer template_name", "\"%(name)s was updated successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView): model = Customer template_name", "= \"%(name)s was created successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView): filterset_class = CustomerFilter", "success_message = \"Session %(name)s was removed successfully\" class CustomerDetailsView(OwnerDetailView): model = Customer template_name", "from django.urls import reverse_lazy from customer.owner import * from .filters import * from", "success_url = reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView): model = Customer template_name = 'customer/customer_delete.html' success_url =", "successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView): model = Customer template_name = 'customer/customer_delete.html' success_url", "import * from .forms import * # class CustomerIndexView(LoginRequiredMixin, View): # login_url =", "from customer.owner import * from .filters import * from .forms import * #", "= reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView): model = Customer template_name = 'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list')", "created successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerListView(OwnerListView): filterset_class = CustomerFilter queryset = Customer.objects.filter(is_deleted=False)", "= CustomerCreateForm paginate_by = 10 template_name = 'customer/create_customer.html' success_message = \"%(name)s was created", "'customer/customer_edit.html' form_class = CustomerUpdateForm success_message = \"%(name)s was updated successfully.\" success_url = reverse_lazy(\"customer:customer_list\")", "model = Customer template_name = 'customer/customer_details.html' def CustomerLedgerView(request, pk): customer = Customer.objects.get(id=pk) ctx", "CustomerCreateForm paginate_by = 10 template_name = 'customer/create_customer.html' success_message = \"%(name)s was created successfully.\"", "success_message = \"%(name)s was updated successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView): model =", "= Customer template_name = 'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list') success_message = \"Session %(name)s was", "View): # login_url = '/login/' # redirect_field_name = 'redirect_to' class CustomerCreateView(OwnerCreateView): form_class =", "# class CustomerIndexView(LoginRequiredMixin, View): # login_url = '/login/' # redirect_field_name = 'redirect_to' class", ".forms import * # class CustomerIndexView(LoginRequiredMixin, View): # login_url = '/login/' # redirect_field_name", "class CustomerListView(OwnerListView): filterset_class = CustomerFilter queryset = Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html' paginate_by =", "= 10 class CustomerUpdateView(OwnerUpdateView): model = Customer template_name = 'customer/customer_edit.html' form_class = CustomerUpdateForm", "filterset_class = CustomerFilter queryset = Customer.objects.filter(is_deleted=False) template_name = 'customer/customer_list.html' paginate_by = 10 class", "'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list') success_message = \"Session %(name)s was removed successfully\" class CustomerDetailsView(OwnerDetailView):", "model = Customer template_name = 'customer/customer_delete.html' success_url = reverse_lazy('customer:customer_list') success_message = \"Session %(name)s", "def CustomerLedgerView(request, pk): customer = Customer.objects.get(id=pk) ctx = {'customer': customer} return render(request, 'customer/customer_ledger.html',", "CustomerDetailsView(OwnerDetailView): model = Customer template_name = 'customer/customer_details.html' def CustomerLedgerView(request, pk): customer = Customer.objects.get(id=pk)", "CustomerUpdateForm success_message = \"%(name)s was updated successfully.\" success_url = reverse_lazy(\"customer:customer_list\") class CustomerDeleteView(OwnerDeleteView): model", "success_url = reverse_lazy('customer:customer_list') success_message = \"Session %(name)s was removed successfully\" class CustomerDetailsView(OwnerDetailView): model" ]
[ "\"S.no\": [], \"Link_name\": [], \"XPATH\": [], \"URL\": [], \"Directed URL\": [], \"Validation\": []", "= Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3) df = { \"S.no\": [], \"Link_name\": [],", "= footer.get_attribute(\"href\") text = footer.get_attribute(\"text\") try: footer.click() except ElementNotInteractableException as e: self.logger.info(\"**************** {}", "finally: df[\"S.no\"].append(n + 1) df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i) Data = pd.DataFrame(df, index=df[\"S.no\"])", "import ElementNotInteractableException import pandas as pd import time class Test_1: base_url = siteconfig.getsiteURl()", "test started **************\") driver = setup driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers = Footer(driver) footer_xpath", "n, i in enumerate(footer_xpath): footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href = footer.get_attribute(\"href\") text =", "df[\"S.no\"].append(n + 1) df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i) Data = pd.DataFrame(df, index=df[\"S.no\"]) output", "{} footer passed *********\".format(text)) result.append(\"passed\") finally: df[\"S.no\"].append(n + 1) df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url)", "import webdriver from selenium.webdriver.common.by import By from pageObjects.footer import Footer from utilities.customLogger import", "LogGen.loggen() def test_footer_links(self, setup): self.logger.info(\"*************** footer links test started **************\") driver = setup", "siteconfig.getfooterXPATH() logger = LogGen.loggen() def test_footer_links(self, setup): self.logger.info(\"*************** footer links test started **************\")", "import pandas as pd import time class Test_1: base_url = siteconfig.getsiteURl() base_xpath =", "footer links test started **************\") driver = setup driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers =", "text = footer.get_attribute(\"text\") try: footer.click() except ElementNotInteractableException as e: self.logger.info(\"**************** {} ************\".format(e)) self.logger.info(\"************", "+ 1) df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i) Data = pd.DataFrame(df, index=df[\"S.no\"]) output =", "base_url = siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH() logger = LogGen.loggen() def test_footer_links(self, setup): self.logger.info(\"***************", "[], \"XPATH\": [], \"URL\": [], \"Directed URL\": [], \"Validation\": [] } result =", "driver = setup driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers = Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3)", "[], \"Validation\": [] } result = [] for n, i in enumerate(footer_xpath): footer", "\"XPATH\": [], \"URL\": [], \"Directed URL\": [], \"Validation\": [] } result = []", "= driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href = footer.get_attribute(\"href\") text = footer.get_attribute(\"text\") try: footer.click() except ElementNotInteractableException", "pandas as pd import time class Test_1: base_url = siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH()", "footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href = footer.get_attribute(\"href\") text = footer.get_attribute(\"text\") try: footer.click() except", "df[\"XPATH\"].append(i) Data = pd.DataFrame(df, index=df[\"S.no\"]) output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save() if \"failed\" not", "for n, i in enumerate(footer_xpath): footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href = footer.get_attribute(\"href\") text", "URL\": [], \"Validation\": [] } result = [] for n, i in enumerate(footer_xpath):", "pd.DataFrame(df, index=df[\"S.no\"]) output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save() if \"failed\" not in result: assert", "pageObjects.footer import Footer from utilities.customLogger import LogGen from utilities.siteConfig import siteconfig from selenium.common.exceptions", "links test started **************\") driver = setup driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers = Footer(driver)", "df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i) Data = pd.DataFrame(df, index=df[\"S.no\"]) output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save() if", "time class Test_1: base_url = siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH() logger = LogGen.loggen() def", "\"Directed URL\": [], \"Validation\": [] } result = [] for n, i in", "driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers = Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3) df = {", "i in enumerate(footer_xpath): footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href = footer.get_attribute(\"href\") text = footer.get_attribute(\"text\")", "import siteconfig from selenium.common.exceptions import ElementNotInteractableException import pandas as pd import time class", "[], \"Link_name\": [], \"XPATH\": [], \"URL\": [], \"Directed URL\": [], \"Validation\": [] }", "df[\"Validation\"].append(\"failed\") result.append(\"failed\") else: current_url = driver.current_url time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {} footer passed", "webdriver from selenium.webdriver.common.by import By from pageObjects.footer import Footer from utilities.customLogger import LogGen", "logger = LogGen.loggen() def test_footer_links(self, setup): self.logger.info(\"*************** footer links test started **************\") driver", "footers.footer_links_xpath(self.base_xpath) time.sleep(3) df = { \"S.no\": [], \"Link_name\": [], \"XPATH\": [], \"URL\": [],", "Test_1: base_url = siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH() logger = LogGen.loggen() def test_footer_links(self, setup):", "pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save() if \"failed\" not in result: assert True else: assert False", "footer.get_attribute(\"href\") text = footer.get_attribute(\"text\") try: footer.click() except ElementNotInteractableException as e: self.logger.info(\"**************** {} ************\".format(e))", "base_xpath = siteconfig.getfooterXPATH() logger = LogGen.loggen() def test_footer_links(self, setup): self.logger.info(\"*************** footer links test", "test_footer_links(self, setup): self.logger.info(\"*************** footer links test started **************\") driver = setup driver.get(self.base_url) driver.maximize_window()", "= [] for n, i in enumerate(footer_xpath): footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href =", "{ \"S.no\": [], \"Link_name\": [], \"XPATH\": [], \"URL\": [], \"Directed URL\": [], \"Validation\":", "siteconfig from selenium.common.exceptions import ElementNotInteractableException import pandas as pd import time class Test_1:", "from pageObjects.footer import Footer from utilities.customLogger import LogGen from utilities.siteConfig import siteconfig from", "Footer from utilities.customLogger import LogGen from utilities.siteConfig import siteconfig from selenium.common.exceptions import ElementNotInteractableException", "in enumerate(footer_xpath): footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href = footer.get_attribute(\"href\") text = footer.get_attribute(\"text\") try:", "ElementNotInteractableException as e: self.logger.info(\"**************** {} ************\".format(e)) self.logger.info(\"************ {} footer failed *********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\")", "**************\") driver = setup driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers = Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath)", "footer.location_once_scrolled_into_view href = footer.get_attribute(\"href\") text = footer.get_attribute(\"text\") try: footer.click() except ElementNotInteractableException as e:", "pd import time class Test_1: base_url = siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH() logger =", "= driver.current_url time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {} footer passed *********\".format(text)) result.append(\"passed\") finally: df[\"S.no\"].append(n", "output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save() if \"failed\" not in result: assert True else:", "index=df[\"S.no\"]) output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save() if \"failed\" not in result: assert True", "\"URL\": [], \"Directed URL\": [], \"Validation\": [] } result = [] for n,", "time.sleep(3) footers = Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3) df = { \"S.no\": [],", "= footer.get_attribute(\"text\") try: footer.click() except ElementNotInteractableException as e: self.logger.info(\"**************** {} ************\".format(e)) self.logger.info(\"************ {}", "df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i) Data = pd.DataFrame(df, index=df[\"S.no\"]) output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save()", "as e: self.logger.info(\"**************** {} ************\".format(e)) self.logger.info(\"************ {} footer failed *********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\") else:", "footer failed *********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\") else: current_url = driver.current_url time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************", "= siteconfig.getfooterXPATH() logger = LogGen.loggen() def test_footer_links(self, setup): self.logger.info(\"*************** footer links test started", "selenium.webdriver.common.by import By from pageObjects.footer import Footer from utilities.customLogger import LogGen from utilities.siteConfig", "*********\".format(text)) result.append(\"passed\") finally: df[\"S.no\"].append(n + 1) df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i) Data =", "ElementNotInteractableException import pandas as pd import time class Test_1: base_url = siteconfig.getsiteURl() base_xpath", "from selenium import webdriver from selenium.webdriver.common.by import By from pageObjects.footer import Footer from", "self.logger.info(\"*************** footer links test started **************\") driver = setup driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers", "[], \"Directed URL\": [], \"Validation\": [] } result = [] for n, i", "driver.current_url time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {} footer passed *********\".format(text)) result.append(\"passed\") finally: df[\"S.no\"].append(n +", "1) df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i) Data = pd.DataFrame(df, index=df[\"S.no\"]) output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\")", "LogGen from utilities.siteConfig import siteconfig from selenium.common.exceptions import ElementNotInteractableException import pandas as pd", "{} footer failed *********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\") else: current_url = driver.current_url time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\")", "Data = pd.DataFrame(df, index=df[\"S.no\"]) output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save() if \"failed\" not in", "except ElementNotInteractableException as e: self.logger.info(\"**************** {} ************\".format(e)) self.logger.info(\"************ {} footer failed *********\".format(text)) df[\"Validation\"].append(\"failed\")", "time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {} footer passed *********\".format(text)) result.append(\"passed\") finally: df[\"S.no\"].append(n + 1)", "import By from pageObjects.footer import Footer from utilities.customLogger import LogGen from utilities.siteConfig import", "= pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save() if \"failed\" not in result: assert True else: assert", "= pd.DataFrame(df, index=df[\"S.no\"]) output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save() if \"failed\" not in result:", "self.logger.info(\"**************** {} ************\".format(e)) self.logger.info(\"************ {} footer failed *********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\") else: current_url =", "selenium.common.exceptions import ElementNotInteractableException import pandas as pd import time class Test_1: base_url =", "from selenium.webdriver.common.by import By from pageObjects.footer import Footer from utilities.customLogger import LogGen from", "class Test_1: base_url = siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH() logger = LogGen.loggen() def test_footer_links(self,", "current_url = driver.current_url time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {} footer passed *********\".format(text)) result.append(\"passed\") finally:", "def test_footer_links(self, setup): self.logger.info(\"*************** footer links test started **************\") driver = setup driver.get(self.base_url)", "self.logger.info(\"************ {} footer failed *********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\") else: current_url = driver.current_url time.sleep(1) driver.back()", "href = footer.get_attribute(\"href\") text = footer.get_attribute(\"text\") try: footer.click() except ElementNotInteractableException as e: self.logger.info(\"****************", "driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href = footer.get_attribute(\"href\") text = footer.get_attribute(\"text\") try: footer.click() except ElementNotInteractableException as", "footers = Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3) df = { \"S.no\": [], \"Link_name\":", "Data.to_excel(output) output.save() if \"failed\" not in result: assert True else: assert False driver.close()", "passed *********\".format(text)) result.append(\"passed\") finally: df[\"S.no\"].append(n + 1) df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i) Data", "*********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\") else: current_url = driver.current_url time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {} footer", "from utilities.siteConfig import siteconfig from selenium.common.exceptions import ElementNotInteractableException import pandas as pd import", "df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i) Data = pd.DataFrame(df, index=df[\"S.no\"]) output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output)", "= footers.footer_links_xpath(self.base_xpath) time.sleep(3) df = { \"S.no\": [], \"Link_name\": [], \"XPATH\": [], \"URL\":", "} result = [] for n, i in enumerate(footer_xpath): footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view", "enumerate(footer_xpath): footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href = footer.get_attribute(\"href\") text = footer.get_attribute(\"text\") try: footer.click()", "[] } result = [] for n, i in enumerate(footer_xpath): footer = driver.find_element_by_xpath(i)", "result = [] for n, i in enumerate(footer_xpath): footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href", "result.append(\"passed\") finally: df[\"S.no\"].append(n + 1) df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i) Data = pd.DataFrame(df,", "try: footer.click() except ElementNotInteractableException as e: self.logger.info(\"**************** {} ************\".format(e)) self.logger.info(\"************ {} footer failed", "utilities.siteConfig import siteconfig from selenium.common.exceptions import ElementNotInteractableException import pandas as pd import time", "************\".format(e)) self.logger.info(\"************ {} footer failed *********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\") else: current_url = driver.current_url time.sleep(1)", "self.logger.info(\"************ {} footer passed *********\".format(text)) result.append(\"passed\") finally: df[\"S.no\"].append(n + 1) df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed", "footer passed *********\".format(text)) result.append(\"passed\") finally: df[\"S.no\"].append(n + 1) df[\"Link_name\"].append(text) df[\"URL\"].append(href) df[\"Directed URL\"].append(current_url) df[\"XPATH\"].append(i)", "df = { \"S.no\": [], \"Link_name\": [], \"XPATH\": [], \"URL\": [], \"Directed URL\":", "driver.maximize_window() time.sleep(3) footers = Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3) df = { \"S.no\":", "{} ************\".format(e)) self.logger.info(\"************ {} footer failed *********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\") else: current_url = driver.current_url", "setup driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers = Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3) df =", "footer.click() except ElementNotInteractableException as e: self.logger.info(\"**************** {} ************\".format(e)) self.logger.info(\"************ {} footer failed *********\".format(text))", "siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH() logger = LogGen.loggen() def test_footer_links(self, setup): self.logger.info(\"*************** footer links", "\"Link_name\": [], \"XPATH\": [], \"URL\": [], \"Directed URL\": [], \"Validation\": [] } result", "failed *********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\") else: current_url = driver.current_url time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {}", "= setup driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers = Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3) df", "e: self.logger.info(\"**************** {} ************\".format(e)) self.logger.info(\"************ {} footer failed *********\".format(text)) df[\"Validation\"].append(\"failed\") result.append(\"failed\") else: current_url", "\"Validation\": [] } result = [] for n, i in enumerate(footer_xpath): footer =", "df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {} footer passed *********\".format(text)) result.append(\"passed\") finally: df[\"S.no\"].append(n + 1) df[\"Link_name\"].append(text) df[\"URL\"].append(href)", "from selenium.common.exceptions import ElementNotInteractableException import pandas as pd import time class Test_1: base_url", "By from pageObjects.footer import Footer from utilities.customLogger import LogGen from utilities.siteConfig import siteconfig", "import time class Test_1: base_url = siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH() logger = LogGen.loggen()", "driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {} footer passed *********\".format(text)) result.append(\"passed\") finally: df[\"S.no\"].append(n + 1) df[\"Link_name\"].append(text)", "setup): self.logger.info(\"*************** footer links test started **************\") driver = setup driver.get(self.base_url) driver.maximize_window() time.sleep(3)", "result.append(\"failed\") else: current_url = driver.current_url time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {} footer passed *********\".format(text))", "as pd import time class Test_1: base_url = siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH() logger", "[], \"URL\": [], \"Directed URL\": [], \"Validation\": [] } result = [] for", "footer.get_attribute(\"text\") try: footer.click() except ElementNotInteractableException as e: self.logger.info(\"**************** {} ************\".format(e)) self.logger.info(\"************ {} footer", "import Footer from utilities.customLogger import LogGen from utilities.siteConfig import siteconfig from selenium.common.exceptions import", "import LogGen from utilities.siteConfig import siteconfig from selenium.common.exceptions import ElementNotInteractableException import pandas as", "from utilities.customLogger import LogGen from utilities.siteConfig import siteconfig from selenium.common.exceptions import ElementNotInteractableException import", "URL\"].append(current_url) df[\"XPATH\"].append(i) Data = pd.DataFrame(df, index=df[\"S.no\"]) output = pd.ExcelWriter(\".//reports/footers_links_validation.xlsx\") Data.to_excel(output) output.save() if \"failed\"", "utilities.customLogger import LogGen from utilities.siteConfig import siteconfig from selenium.common.exceptions import ElementNotInteractableException import pandas", "else: current_url = driver.current_url time.sleep(1) driver.back() df[\"Validation\"].append(\"passed\") self.logger.info(\"************ {} footer passed *********\".format(text)) result.append(\"passed\")", "selenium import webdriver from selenium.webdriver.common.by import By from pageObjects.footer import Footer from utilities.customLogger", "= { \"S.no\": [], \"Link_name\": [], \"XPATH\": [], \"URL\": [], \"Directed URL\": [],", "= siteconfig.getsiteURl() base_xpath = siteconfig.getfooterXPATH() logger = LogGen.loggen() def test_footer_links(self, setup): self.logger.info(\"*************** footer", "Footer(driver) footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3) df = { \"S.no\": [], \"Link_name\": [], \"XPATH\":", "time.sleep(3) df = { \"S.no\": [], \"Link_name\": [], \"XPATH\": [], \"URL\": [], \"Directed", "footer_xpath = footers.footer_links_xpath(self.base_xpath) time.sleep(3) df = { \"S.no\": [], \"Link_name\": [], \"XPATH\": [],", "[] for n, i in enumerate(footer_xpath): footer = driver.find_element_by_xpath(i) footer.location_once_scrolled_into_view href = footer.get_attribute(\"href\")", "started **************\") driver = setup driver.get(self.base_url) driver.maximize_window() time.sleep(3) footers = Footer(driver) footer_xpath =", "= LogGen.loggen() def test_footer_links(self, setup): self.logger.info(\"*************** footer links test started **************\") driver =" ]
[ "return file_name try: with self.data_client.get_stream(path, { 'Range': f'bytes={start_pos + existing_size}-{start_pos + length -", "1) num_connections = min(num_connections, 128) if file_size < 100 * 1024 * 1024:", "return final_file_name @staticmethod def is_genomic_range(genomic_range_args): if not genomic_range_args: return False return genomic_range_args[0] is", "os import re import shutil import sys import time import urllib import htsget", "unit_scale=True) as pbar: params = [ (os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len, file_size - chunk_start_pos),", "if total_received != length: raise Exception(f\"Slice error: received={total_received}, requested={length}, file='{file_name}'\") except Exception: if", "can check which datasets your account has access to at \" \"'https://ega-archive.org/my-datasets.php' after", "to this file. \" \"You can check which datasets your account has access", "os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name, final_file_name) return final_file_name @staticmethod def is_genomic_range(genomic_range_args): if not genomic_range_args:", "that the necessary ports are open in your \" \"firewall. See the documentation", "num_connections = 1 logging.info(f\"Download starting [using {num_connections} connection(s), file size {file_size} and chunk", "Unable to obtain valid MD5 from the server (received: {check_sum}).\" f\" Can't validate", "file_id self.temporary_files = set() self._display_file_name = display_file_name self._file_name = file_name self._file_size = size", "print_local_file_info(prefix_str, file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\") def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an", "!= ext and len(format_ext) > 1: ext += format_ext ret_val = os.path.join(folder, self.id,", "f\" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\" ) def download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries, retry_wait,", "continue if (file_from, file_length) in [(param[1], param[2]) for param in params]: continue logging.warning(f'Deleting", "(genomic_range_args[0] or genomic_range_args[1]) genomic_range += '_' + (str(genomic_range_args[2]) or '0') genomic_range += '_'", "or '').strip().lower() if format_ext != ext and len(format_ext) > 1: ext += format_ext", "1 logging.info(f\"Download starting [using {num_connections} connection(s), file size {file_size} and chunk \" f\"length", "unencrypted_checksum self._file_status = status def load_metadata(self): res = self.data_client.get_json(f\"/metadata/files/{self.id}\") # If the user", "file exists:', output_file, check_sum) return num_connections = max(num_connections, 1) num_connections = min(num_connections, 128)", "on the file size)\") received_file_md5 = utils.md5(output_file, file_size) logging.info(\"Verifying file checksum\") if received_file_md5", "200 but the JSON payload has # all the fields empty if res['displayFileName']", "os.path.join(folder, self.id, name + genomic_range + ext) logging.debug(f\"Output file:'{ret_val}'\") return ret_val @staticmethod def", "executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close() downloaded_file_total_size = sum(os.path.getsize(f) for f in results) if downloaded_file_total_size", "name, ext = os.path.splitext(os.path.basename(file_name)) genomic_range = '' if DataFile.is_genomic_range(genomic_range_args): genomic_range = \"_genomic_range_\" +", "num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an individual file\"\"\" file_size = self.size check_sum = self.unencrypted_checksum options", "= res['displayFileName'] self._file_name = res['fileName'] self._file_size = res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status =", "HTTP code 200 but the JSON payload has # all the fields empty", "md5={md5})\") def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an individual file\"\"\" file_size = self.size", "options=None, pbar=None): if start_pos < 0: raise ValueError(\"start : must be positive\") if", "not be retrieved. \" + \"This is probably because your account does not", "expected md5 value '{check_sum}' but got '{received_file_md5}'\") def download_file_slice_(self, args): return self.download_file_slice(*args) def", "(this operation can take a long time depending on the file size)\") received_file_md5", "= os.path.splitext(os.path.basename(file_name)) genomic_range = '' if DataFile.is_genomic_range(genomic_range_args): genomic_range = \"_genomic_range_\" + (genomic_range_args[0] or", "< 0 else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to : ', output_file, genomic_range_args) return", "plain mode if os.path.exists(output_file) and utils.md5(output_file, file_size) == check_sum: DataFile.print_local_file_info('Local file exists:', output_file,", "raise ValueError(\"start : must be positive\") if length <= 0: raise ValueError(\"length :", "chunk_start_pos in range(0, file_size, chunk_len)] for file in os.listdir(temporary_directory): match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file)", "good md5 in aux file for future re-use f.write(received_file_md5.encode()) else: os.remove(output_file) raise Exception(f\"Download", "shutil import sys import time import urllib import htsget import psutil from tqdm", "\"plain\"} file_size -= 16 # 16 bytes IV not necesary in plain mode", "genomic_range += '_' + (str(genomic_range_args[3]) or '') format_ext = '.' + (genomic_range_args[4] or", "= 1 logging.info(f\"Download starting [using {num_connections} connection(s), file size {file_size} and chunk \"", "else 0 if existing_size > length: os.remove(file_name) if pbar: pbar.update(existing_size) if existing_size ==", "load_metadata(self): res = self.data_client.get_json(f\"/metadata/files/{self.id}\") # If the user does not have access to", "!= self.id: continue if (file_from, file_length) in [(param[1], param[2]) for param in params]:", "file_out.write(chunk) if pbar: pbar.update(len(chunk)) total_received = os.path.getsize(file_name) if total_received != length: raise Exception(f\"Slice", "parameter (' f'and thus the slice sizes) have been modified since the last", "thus the slice sizes) have been modified since the last run.') os.remove(os.path.join(temporary_directory, file))", "pbar.update(len(chunk)) total_received = os.path.getsize(file_name) if total_received != length: raise Exception(f\"Slice error: received={total_received}, requested={length},", "os.path.exists(file_name) else 0 if existing_size > length: os.remove(file_name) if pbar: pbar.update(existing_size) if existing_size", "is None: self.load_metadata() return self._unencrypted_checksum @property def status(self): if self._file_status is None: self.load_metadata()", "free space in this \" f\"location\") if DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb') as output:", "'{self.id}' could not be retrieved. \" + \"This is probably because your account", "GiB\") logging.info(f\"Free space : {hdd.free / (2 ** 30):.2f} GiB\") # If file", "this \" f\"location\") if DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb') as output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output,", "with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name in executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close() downloaded_file_total_size =", "downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum or '')) != 32", "* 1024 class DataFile: DEFAULT_SLICE_SIZE = 100 * 1024 * 1024 temporary_files_should_be_deleted =", "f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries < 0 else", "retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"): logging.info( \"GPG files are currently not supported.\" \" Please", "access to the file then the server returns HTTP code 200 but the", "hdd = psutil.disk_usage(os.getcwd()) logging.info(f\"Total space : {hdd.total / (2 ** 30):.2f} GiB\") logging.info(f\"Used", "raise Exception(f\"Download process expected md5 value '{check_sum}' but got '{received_file_md5}'\") def download_file_slice_(self, args):", "Exception(f\"Download process expected md5 value '{check_sum}' but got '{received_file_md5}'\") def download_file_slice_(self, args): return", "supported.\" \" Please email EGA Helpdesk at <EMAIL>\") return logging.info(f\"File Id: '{self.id}'({self.size} bytes).\")", "your account does not have access to this file. \" \"You can check", "account has access to at \" \"'https://ega-archive.org/my-datasets.php' after logging in.\") self._display_file_name = res['displayFileName']", "data_client self.id = file_id self.temporary_files = set() self._display_file_name = display_file_name self._file_name = file_name", "if received_file_md5 == check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved to : ', output_file, check_sum) if", "in params]: continue logging.warning(f'Deleting the leftover {file} temporary file because the MAX_SLICE_SIZE parameter", "display_file_name=None, file_name=None, size=None, unencrypted_checksum=None, status=None): self.data_client = data_client self.id = file_id self.temporary_files =", "file_size) == check_sum: DataFile.print_local_file_info('Local file exists:', output_file, check_sum) return num_connections = max(num_connections, 1)", "self._file_size = res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status = res['fileStatus'] @property def display_name(self): if", "positive\") if length <= 0: raise ValueError(\"length : must be positive\") path =", "{hdd.total / (2 ** 30):.2f} GiB\") logging.info(f\"Used space : {hdd.used / (2 **", "params]: continue logging.warning(f'Deleting the leftover {file} temporary file because the MAX_SLICE_SIZE parameter ('", "r: with open(file_name, 'ba') as file_out: for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar:", "pbar=None): if start_pos < 0: raise ValueError(\"start : must be positive\") if length", "server returns HTTP code 200 but the JSON payload has # all the", "1: ext += format_ext ret_val = os.path.join(folder, self.id, name + genomic_range + ext)", "from the server (received: {check_sum}).\" f\" Can't validate download. Please contact EGA helpdesk", "100 * 1024 * 1024: num_connections = 1 logging.info(f\"Download starting [using {num_connections} connection(s),", "max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"): logging.info( \"GPG files are currently not supported.\" \"", "\" Please email EGA Helpdesk at <EMAIL>\") return logging.info(f\"File Id: '{self.id}'({self.size} bytes).\") output_file", "space : {hdd.used / (2 ** 30):.2f} GiB\") logging.info(f\"Free space : {hdd.free /", "service. Check that the necessary ports are open in your \" \"firewall. See", "for part_file_name in executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close() downloaded_file_total_size = sum(os.path.getsize(f) for f in", "if DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb') as output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2],", "match.group(3) if file_id != self.id: continue if (file_from, file_length) in [(param[1], param[2]) for", "not None or genomic_range_args[1] is not None def generate_output_filename(self, folder, genomic_range_args): file_name =", "\"This is probably because your account does not have access to this file.", "all the fields empty if res['displayFileName'] is None or res['unencryptedChecksum'] is None: raise", "logging import logging.handlers import os import re import shutil import sys import time", "params): results.append(part_file_name) pbar.close() downloaded_file_total_size = sum(os.path.getsize(f) for f in results) if downloaded_file_total_size ==", "file for future re-use f.write(received_file_md5.encode()) else: os.remove(output_file) raise Exception(f\"Download process expected md5 value", "payload has # all the fields empty if res['displayFileName'] is None or res['unencryptedChecksum']", "== check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved to : ', output_file, check_sum) if not_valid_server_md5: logging.info(", "** 30):.2f} GiB\") # If file is bigger than free space, warning if", "genomic_range_args[1]) genomic_range += '_' + (str(genomic_range_args[2]) or '0') genomic_range += '_' + (str(genomic_range_args[3])", "temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar: params", "os.rename(file_name, final_file_name) return final_file_name @staticmethod def is_genomic_range(genomic_range_args): if not genomic_range_args: return False return", "# If file is bigger than free space, warning if hdd.free < self.size:", "the file then the server returns HTTP code 200 but the JSON payload", "+ (str(genomic_range_args[3]) or '') format_ext = '.' + (genomic_range_args[4] or '').strip().lower() if format_ext", "format_ext != ext and len(format_ext) > 1: ext += format_ext ret_val = os.path.join(folder,", "logging.handlers import os import re import shutil import sys import time import urllib", "chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar: pbar.update(len(chunk)) total_received = os.path.getsize(file_name) if total_received !=", "this file. \" \"You can check which datasets your account has access to", "res = self.data_client.get_json(f\"/metadata/files/{self.id}\") # If the user does not have access to the", "self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status = res['fileStatus'] @property def display_name(self): if self._display_file_name is None:", "DataFile.print_local_file_info('Local file exists:', output_file, check_sum) return num_connections = max(num_connections, 1) num_connections = min(num_connections,", "options is not None: path += '?' + urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name", "', output_file, genomic_range_args) return done = False num_retries = 0 while not done:", "start_pos < 0: raise ValueError(\"start : must be positive\") if length <= 0:", "@property def display_name(self): if self._display_file_name is None: self.load_metadata() return self._display_file_name @property def name(self):", "DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait) num_retries += 1 logging.info(f\"retry attempt {num_retries}\") def delete_temporary_folder(self,", "name + genomic_range + ext) logging.debug(f\"Output file:'{ret_val}'\") return ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str, file,", "os.listdir(temporary_directory): match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id = match.group(1) file_from = match.group(2) file_length =", "If file is bigger than free space, warning if hdd.free < self.size: logging.warning(f\"The", "self._file_name = file_name self._file_size = size self._unencrypted_checksum = unencrypted_checksum self._file_status = status def", "f: # save good md5 in aux file for future re-use f.write(received_file_md5.encode()) else:", "have been modified since the last run.') os.remove(os.path.join(temporary_directory, file)) results = [] with", "results, downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum or '')) != 32 logging.info(\"Calculating md5 (this operation", "'_' + (str(genomic_range_args[3]) or '') format_ext = '.' + (genomic_range_args[4] or '').strip().lower() if", "if e is ConnectionError: logging.info(\"Failed to connect to data service. Check that the", "= sum(os.path.getsize(f) for f in results) if downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size)", "referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\" ) def download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries,", "folder, genomic_range_args): file_name = self.display_name ext_to_remove = \".cip\" if file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)]", "os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd()) logging.info(f\"Total space : {hdd.total / (2 ** 30):.2f} GiB\")", "self.temporary_files = set() self._display_file_name = display_file_name self._file_name = file_name self._file_size = size self._unencrypted_checksum", "= res['fileName'] self._file_size = res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status = res['fileStatus'] @property def", "is None: self.load_metadata() return self._file_status @staticmethod def print_local_file_info(prefix_str, file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\")", "not have access to this file. \" \"You can check which datasets your", "chunk \" f\"length {max_slice_size}]...\") chunk_len = max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory, exist_ok=True)", "not_valid_server_md5: logging.info( f\"WARNING: Unable to obtain valid MD5 from the server (received: {check_sum}).\"", "pbar: params = [ (os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len, file_size - chunk_start_pos), options, pbar)", "is bigger than free space, warning if hdd.free < self.size: logging.warning(f\"The size of", "= os.path.join(folder, self.id, name + genomic_range + ext) logging.debug(f\"Output file:'{ret_val}'\") return ret_val @staticmethod", "time.sleep(retry_wait) num_retries += 1 logging.info(f\"retry attempt {num_retries}\") def delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory) except", "None: raise RuntimeError(f\"Metadata for file id '{self.id}' could not be retrieved. \" +", ": ', output_file, check_sum) if not_valid_server_md5: logging.info( f\"WARNING: Unable to obtain valid MD5", "logging.exception(e) if num_retries == max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait) num_retries +=", "self._unencrypted_checksum is None: self.load_metadata() return self._unencrypted_checksum @property def status(self): if self._file_status is None:", "file is bigger than free space, warning if hdd.free < self.size: logging.warning(f\"The size", "{max_slice_size}]...\") chunk_len = max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size), unit='B',", "output_file, check_sum) return num_connections = max(num_connections, 1) num_connections = min(num_connections, 128) if file_size", "min(num_connections, 128) if file_size < 100 * 1024 * 1024: num_connections = 1", "the file size)\") received_file_md5 = utils.md5(output_file, file_size) logging.info(\"Verifying file checksum\") if received_file_md5 ==", "def __init__(self, data_client, file_id, display_file_name=None, file_name=None, size=None, unencrypted_checksum=None, status=None): self.data_client = data_client self.id", "save good md5 in aux file for future re-use f.write(received_file_md5.encode()) else: os.remove(output_file) raise", "is None: self.load_metadata() return self._display_file_name @property def name(self): if self._file_name is None: self.load_metadata()", "{ 'Range': f'bytes={start_pos + existing_size}-{start_pos + length - 1}'}) as r: with open(file_name,", "= 32 * 1024 class DataFile: DEFAULT_SLICE_SIZE = 100 * 1024 * 1024", "'{self.id}'({self.size} bytes).\") output_file = self.generate_output_filename(output_dir, genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") if not os.path.exists(temporary_directory):", "return self.download_file_slice(*args) def download_file_slice(self, file_name, start_pos, length, options=None, pbar=None): if start_pos < 0:", "chunk_start_pos, min(chunk_len, file_size - chunk_start_pos), options, pbar) for chunk_start_pos in range(0, file_size, chunk_len)]", "def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an individual file\"\"\" file_size = self.size check_sum", "self.download_file(output_file, num_connections, max_slice_size) done = True except Exception as e: if e is", "pbar) for chunk_start_pos in range(0, file_size, chunk_len)] for file in os.listdir(temporary_directory): match =", "try: shutil.rmtree(temporary_directory) except FileNotFoundError as ex: logging.error(f'Could not delete the temporary folder: {ex}')", "with open(output_file, 'wb') as output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4],", "is None: self.load_metadata() return self._file_name @property def size(self): if self._file_size is None: self.load_metadata()", "output_file, check_sum) if not_valid_server_md5: logging.info( f\"WARNING: Unable to obtain valid MD5 from the", "is not None or genomic_range_args[1] is not None def generate_output_filename(self, folder, genomic_range_args): file_name", "file size)\") received_file_md5 = utils.md5(output_file, file_size) logging.info(\"Verifying file checksum\") if received_file_md5 == check_sum", "final_file_name + '.tmp' self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size if os.path.exists(file_name) else 0 if existing_size", "your free space in this \" f\"location\") if DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb') as", "'wb') as f: # save good md5 in aux file for future re-use", "the last run.') os.remove(os.path.join(temporary_directory, file)) results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for", "> 1: ext += format_ext ret_val = os.path.join(folder, self.id, name + genomic_range +", "{hdd.free / (2 ** 30):.2f} GiB\") # If file is bigger than free", "f in results) if downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum", "e: if e is ConnectionError: logging.info(\"Failed to connect to data service. Check that", "utils.md5(output_file, file_size) logging.info(\"Verifying file checksum\") if received_file_md5 == check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved to", "unencrypted_checksum=None, status=None): self.data_client = data_client self.id = file_id self.temporary_files = set() self._display_file_name =", "open(utils.get_fname_md5(output_file), 'wb') as f: # save good md5 in aux file for future", "data_client, file_id, display_file_name=None, file_name=None, size=None, unencrypted_checksum=None, status=None): self.data_client = data_client self.id = file_id", "positive\") path = f\"/files/{self.id}\" if options is not None: path += '?' +", "+= 1 logging.info(f\"retry attempt {num_retries}\") def delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory) except FileNotFoundError as", "a long time depending on the file size)\") received_file_md5 = utils.md5(output_file, file_size) logging.info(\"Verifying", "f\"WARNING: Unable to obtain valid MD5 from the server (received: {check_sum}).\" f\" Can't", "if self._file_status is None: self.load_metadata() return self._file_status @staticmethod def print_local_file_info(prefix_str, file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)}", "= [ (os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len, file_size - chunk_start_pos), options, pbar) for chunk_start_pos", "bigger than your free space in this \" f\"location\") if DataFile.is_genomic_range(genomic_range_args): with open(output_file,", "'Range': f'bytes={start_pos + existing_size}-{start_pos + length - 1}'}) as r: with open(file_name, 'ba')", "= match.group(3) if file_id != self.id: continue if (file_from, file_length) in [(param[1], param[2])", "/ (2 ** 30):.2f} GiB\") logging.info(f\"Free space : {hdd.free / (2 ** 30):.2f}", "status(self): if self._file_status is None: self.load_metadata() return self._file_status @staticmethod def print_local_file_info(prefix_str, file, md5):", "pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024 class DataFile: DEFAULT_SLICE_SIZE = 100", "self.size check_sum = self.unencrypted_checksum options = {\"destinationFormat\": \"plain\"} file_size -= 16 # 16", "the documentation for more information.\") logging.exception(e) if num_retries == max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory)", "MAX_SLICE_SIZE parameter (' f'and thus the slice sizes) have been modified since the", "None or genomic_range_args[1] is not None def generate_output_filename(self, folder, genomic_range_args): file_name = self.display_name", "been modified since the last run.') os.remove(os.path.join(temporary_directory, file)) results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections)", "= res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status = res['fileStatus'] @property def display_name(self): if self._display_file_name", "IV not necesary in plain mode if os.path.exists(output_file) and utils.md5(output_file, file_size) == check_sum:", "error: received={total_received}, requested={length}, file='{file_name}'\") except Exception: if os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name, final_file_name) return", "Exception: if os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name, final_file_name) return final_file_name @staticmethod def is_genomic_range(genomic_range_args): if", "is None: raise RuntimeError(f\"Metadata for file id '{self.id}' could not be retrieved. \"", "end={gr_args[3]}, format={gr_args[4]})\" ) def download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"):", "file_name=None, size=None, unencrypted_checksum=None, status=None): self.data_client = data_client self.id = file_id self.temporary_files = set()", "os.path.getsize(file_name) if total_received != length: raise Exception(f\"Slice error: received={total_received}, requested={length}, file='{file_name}'\") except Exception:", "!= length: raise Exception(f\"Slice error: received={total_received}, requested={length}, file='{file_name}'\") except Exception: if os.path.exists(file_name): os.remove(file_name)", "necessary ports are open in your \" \"firewall. See the documentation for more", "{\"destinationFormat\": \"plain\"} file_size -= 16 # 16 bytes IV not necesary in plain", "leftover {file} temporary file because the MAX_SLICE_SIZE parameter (' f'and thus the slice", "return self._file_name @property def size(self): if self._file_size is None: self.load_metadata() return self._file_size @property", "= psutil.disk_usage(os.getcwd()) logging.info(f\"Total space : {hdd.total / (2 ** 30):.2f} GiB\") logging.info(f\"Used space", "length, options=None, pbar=None): if start_pos < 0: raise ValueError(\"start : must be positive\")", "@staticmethod def is_genomic_range(genomic_range_args): if not genomic_range_args: return False return genomic_range_args[0] is not None", "psutil from tqdm import tqdm from pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 *", "space, warning if hdd.free < self.size: logging.warning(f\"The size of the file that you", "hdd.free < self.size: logging.warning(f\"The size of the file that you want to download", "to connect to data service. Check that the necessary ports are open in", "return ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]},", "temporary_directory): try: shutil.rmtree(temporary_directory) except FileNotFoundError as ex: logging.error(f'Could not delete the temporary folder:", "size of the file that you want to download is bigger than your", "DEFAULT_SLICE_SIZE = 100 * 1024 * 1024 temporary_files_should_be_deleted = False def __init__(self, data_client,", "def size(self): if self._file_size is None: self.load_metadata() return self._file_size @property def unencrypted_checksum(self): if", "probably because your account does not have access to this file. \" \"You", "on <EMAIL>\") with open(utils.get_fname_md5(output_file), 'wb') as f: # save good md5 in aux", "(str(genomic_range_args[2]) or '0') genomic_range += '_' + (str(genomic_range_args[3]) or '') format_ext = '.'", "= res['unencryptedChecksum'] self._file_status = res['fileStatus'] @property def display_name(self): if self._display_file_name is None: self.load_metadata()", ": must be positive\") if length <= 0: raise ValueError(\"length : must be", "= \".cip\" if file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)] name, ext = os.path.splitext(os.path.basename(file_name)) genomic_range =", "= display_file_name self._file_name = file_name self._file_size = size self._unencrypted_checksum = unencrypted_checksum self._file_status =", "not genomic_range_args: return False return genomic_range_args[0] is not None or genomic_range_args[1] is not", "logging.info(\"Verifying file checksum\") if received_file_md5 == check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved to : ',", "at \" \"'https://ega-archive.org/my-datasets.php' after logging in.\") self._display_file_name = res['displayFileName'] self._file_name = res['fileName'] self._file_size", "return genomic_range_args[0] is not None or genomic_range_args[1] is not None def generate_output_filename(self, folder,", "** 30):.2f} GiB\") logging.info(f\"Used space : {hdd.used / (2 ** 30):.2f} GiB\") logging.info(f\"Free", "try: self.download_file(output_file, num_connections, max_slice_size) done = True except Exception as e: if e", "which datasets your account has access to at \" \"'https://ega-archive.org/my-datasets.php' after logging in.\")", "file_from = match.group(2) file_length = match.group(3) if file_id != self.id: continue if (file_from,", "= max(num_connections, 1) num_connections = min(num_connections, 128) if file_size < 100 * 1024", "not None: path += '?' + urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name", "os.path.exists(output_file) and utils.md5(output_file, file_size) == check_sum: DataFile.print_local_file_info('Local file exists:', output_file, check_sum) return num_connections", "None: path += '?' + urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name +", "pbar.update(existing_size) if existing_size == length: return file_name try: with self.data_client.get_stream(path, { 'Range': f'bytes={start_pos", "space in this \" f\"location\") if DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb') as output: htsget.get(", "16 # 16 bytes IV not necesary in plain mode if os.path.exists(output_file) and", "(file_from, file_length) in [(param[1], param[2]) for param in params]: continue logging.warning(f'Deleting the leftover", "logging.info(f\"File Id: '{self.id}'({self.size} bytes).\") output_file = self.generate_output_filename(output_dir, genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") if", "can take a long time depending on the file size)\") received_file_md5 = utils.md5(output_file,", "= final_file_name + '.tmp' self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size if os.path.exists(file_name) else 0 if", "= max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size), unit='B', unit_scale=True) as", "logging in.\") self._display_file_name = res['displayFileName'] self._file_name = res['fileName'] self._file_size = res['fileSize'] self._unencrypted_checksum =", "results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name in executor.map(self.download_file_slice_, params): results.append(part_file_name)", "logging.info(\"Calculating md5 (this operation can take a long time depending on the file", "and chunk \" f\"length {max_slice_size}]...\") chunk_len = max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory,", "** 30):.2f} GiB\") logging.info(f\"Free space : {hdd.free / (2 ** 30):.2f} GiB\") #", "@property def unencrypted_checksum(self): if self._unencrypted_checksum is None: self.load_metadata() return self._unencrypted_checksum @property def status(self):", "128) if file_size < 100 * 1024 * 1024: num_connections = 1 logging.info(f\"Download", "file_name = file_name[:-len(ext_to_remove)] name, ext = os.path.splitext(os.path.basename(file_name)) genomic_range = '' if DataFile.is_genomic_range(genomic_range_args): genomic_range", "if self._display_file_name is None: self.load_metadata() return self._display_file_name @property def name(self): if self._file_name is", "is bigger than your free space in this \" f\"location\") if DataFile.is_genomic_range(genomic_range_args): with", "executor: for part_file_name in executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close() downloaded_file_total_size = sum(os.path.getsize(f) for f", "if options is not None: path += '?' + urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice'", "def is_genomic_range(genomic_range_args): if not genomic_range_args: return False return genomic_range_args[0] is not None or", "download_file_slice(self, file_name, start_pos, length, options=None, pbar=None): if start_pos < 0: raise ValueError(\"start :", "@property def name(self): if self._file_name is None: self.load_metadata() return self._file_name @property def size(self):", "def delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory) except FileNotFoundError as ex: logging.error(f'Could not delete the", "import tqdm from pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024 class DataFile:", "ports are open in your \" \"firewall. See the documentation for more information.\")", "self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size if os.path.exists(file_name) else 0 if existing_size > length: os.remove(file_name)", "to download is bigger than your free space in this \" f\"location\") if", "code 200 but the JSON payload has # all the fields empty if", "check_sum: DataFile.print_local_file_info('Local file exists:', output_file, check_sum) return num_connections = max(num_connections, 1) num_connections =", "be positive\") if length <= 0: raise ValueError(\"length : must be positive\") path", "or '') format_ext = '.' + (genomic_range_args[4] or '').strip().lower() if format_ext != ext", "if num_retries == max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait) num_retries += 1", "than your free space in this \" f\"location\") if DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb')", "to : ', output_file, genomic_range_args) return done = False num_retries = 0 while", "if hdd.free < self.size: logging.warning(f\"The size of the file that you want to", "existing_size = os.stat(file_name).st_size if os.path.exists(file_name) else 0 if existing_size > length: os.remove(file_name) if", "in results) if downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum or", "< 0: raise ValueError(\"start : must be positive\") if length <= 0: raise", "(str(genomic_range_args[3]) or '') format_ext = '.' + (genomic_range_args[4] or '').strip().lower() if format_ext !=", "output_dir, genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"): logging.info( \"GPG files are currently not", "file_out: for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar: pbar.update(len(chunk)) total_received = os.path.getsize(file_name) if", "since the last run.') os.remove(os.path.join(temporary_directory, file)) results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor:", "max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to : ', output_file, genomic_range_args) return done = False", "== check_sum: DataFile.print_local_file_info('Local file exists:', output_file, check_sum) return num_connections = max(num_connections, 1) num_connections", "if file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)] name, ext = os.path.splitext(os.path.basename(file_name)) genomic_range = '' if", "GiB\") # If file is bigger than free space, warning if hdd.free <", "currently not supported.\" \" Please email EGA Helpdesk at <EMAIL>\") return logging.info(f\"File Id:", "len(format_ext) > 1: ext += format_ext ret_val = os.path.join(folder, self.id, name + genomic_range", "or '')) != 32 logging.info(\"Calculating md5 (this operation can take a long time", "Check that the necessary ports are open in your \" \"firewall. See the", "check which datasets your account has access to at \" \"'https://ega-archive.org/my-datasets.php' after logging", "self.load_metadata() return self._file_size @property def unencrypted_checksum(self): if self._unencrypted_checksum is None: self.load_metadata() return self._unencrypted_checksum", "ValueError(\"length : must be positive\") path = f\"/files/{self.id}\" if options is not None:", "space : {hdd.free / (2 ** 30):.2f} GiB\") # If file is bigger", "the JSON payload has # all the fields empty if res['displayFileName'] is None", "* 1024: num_connections = 1 logging.info(f\"Download starting [using {num_connections} connection(s), file size {file_size}", "download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an individual file\"\"\" file_size = self.size check_sum =", "1}'}) as r: with open(file_name, 'ba') as file_out: for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk)", "returns HTTP code 200 but the JSON payload has # all the fields", "== max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait) num_retries += 1 logging.info(f\"retry attempt", "32 logging.info(\"Calculating md5 (this operation can take a long time depending on the", "bytes, referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\" ) def download_file_retry(self, num_connections, output_dir, genomic_range_args,", "as executor: for part_file_name in executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close() downloaded_file_total_size = sum(os.path.getsize(f) for", "for f in results) if downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5 =", "or not_valid_server_md5: DataFile.print_local_file_info('Saved to : ', output_file, check_sum) if not_valid_server_md5: logging.info( f\"WARNING: Unable", "has access to at \" \"'https://ega-archive.org/my-datasets.php' after logging in.\") self._display_file_name = res['displayFileName'] self._file_name", "= \"_genomic_range_\" + (genomic_range_args[0] or genomic_range_args[1]) genomic_range += '_' + (str(genomic_range_args[2]) or '0')", "size=None, unencrypted_checksum=None, status=None): self.data_client = data_client self.id = file_id self.temporary_files = set() self._display_file_name", "os.path.splitext(os.path.basename(file_name)) genomic_range = '' if DataFile.is_genomic_range(genomic_range_args): genomic_range = \"_genomic_range_\" + (genomic_range_args[0] or genomic_range_args[1])", "have access to this file. \" \"You can check which datasets your account", "= f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name + '.tmp' self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size if os.path.exists(file_name)", "file:'{ret_val}'\") return ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\" f\"", "logging.info(f\"Download starting [using {num_connections} connection(s), file size {file_size} and chunk \" f\"length {max_slice_size}]...\")", "= True except Exception as e: if e is ConnectionError: logging.info(\"Failed to connect", "< self.size: logging.warning(f\"The size of the file that you want to download is", "match.group(1) file_from = match.group(2) file_length = match.group(3) if file_id != self.id: continue if", "None: self.load_metadata() return self._file_name @property def size(self): if self._file_size is None: self.load_metadata() return", "could not be retrieved. \" + \"This is probably because your account does", "<= 0: raise ValueError(\"length : must be positive\") path = f\"/files/{self.id}\" if options", "'ba') as file_out: for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar: pbar.update(len(chunk)) total_received =", "num_connections, output_dir, genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"): logging.info( \"GPG files are currently", "self.data_client.get_json(f\"/metadata/files/{self.id}\") # If the user does not have access to the file then", "res['fileName'] self._file_size = res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status = res['fileStatus'] @property def display_name(self):", "in range(0, file_size, chunk_len)] for file in os.listdir(temporary_directory): match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id", "f'bytes={start_pos + existing_size}-{start_pos + length - 1}'}) as r: with open(file_name, 'ba') as", "the server returns HTTP code 200 but the JSON payload has # all", "(received: {check_sum}).\" f\" Can't validate download. Please contact EGA helpdesk on <EMAIL>\") with", "md5 value '{check_sum}' but got '{received_file_md5}'\") def download_file_slice_(self, args): return self.download_file_slice(*args) def download_file_slice(self,", "check_sum) return num_connections = max(num_connections, 1) num_connections = min(num_connections, 128) if file_size <", "if (file_from, file_length) in [(param[1], param[2]) for param in params]: continue logging.warning(f'Deleting the", "Please email EGA Helpdesk at <EMAIL>\") return logging.info(f\"File Id: '{self.id}'({self.size} bytes).\") output_file =", "tqdm import tqdm from pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024 class", "1024 class DataFile: DEFAULT_SLICE_SIZE = 100 * 1024 * 1024 temporary_files_should_be_deleted = False", "__init__(self, data_client, file_id, display_file_name=None, file_name=None, size=None, unencrypted_checksum=None, status=None): self.data_client = data_client self.id =", "= size self._unencrypted_checksum = unencrypted_checksum self._file_status = status def load_metadata(self): res = self.data_client.get_json(f\"/metadata/files/{self.id}\")", "logging.debug(f\"Output file:'{ret_val}'\") return ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\"", "email EGA Helpdesk at <EMAIL>\") return logging.info(f\"File Id: '{self.id}'({self.size} bytes).\") output_file = self.generate_output_filename(output_dir,", "results) if downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum or ''))", "return logging.info(f\"File Id: '{self.id}'({self.size} bytes).\") output_file = self.generate_output_filename(output_dir, genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\")", "as file_out: for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar: pbar.update(len(chunk)) total_received = os.path.getsize(file_name)", "md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\") def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an individual file\"\"\"", "set() self._display_file_name = display_file_name self._file_name = file_name self._file_size = size self._unencrypted_checksum = unencrypted_checksum", "the leftover {file} temporary file because the MAX_SLICE_SIZE parameter (' f'and thus the", "logging.info( f\"WARNING: Unable to obtain valid MD5 from the server (received: {check_sum}).\" f\"", "logging.warning(f\"The size of the file that you want to download is bigger than", "open(file_name, 'ba') as file_out: for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar: pbar.update(len(chunk)) total_received", "+= '_' + (str(genomic_range_args[2]) or '0') genomic_range += '_' + (str(genomic_range_args[3]) or '')", "return self._file_size @property def unencrypted_checksum(self): if self._unencrypted_checksum is None: self.load_metadata() return self._unencrypted_checksum @property", "with tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar: params = [ (os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len,", "= data_client self.id = file_id self.temporary_files = set() self._display_file_name = display_file_name self._file_name =", "f\"location\") if DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb') as output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1],", "f.write(received_file_md5.encode()) else: os.remove(output_file) raise Exception(f\"Download process expected md5 value '{check_sum}' but got '{received_file_md5}'\")", "return self._display_file_name @property def name(self): if self._file_name is None: self.load_metadata() return self._file_name @property", "are open in your \" \"firewall. See the documentation for more information.\") logging.exception(e)", "{num_retries}\") def delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory) except FileNotFoundError as ex: logging.error(f'Could not delete", "in os.listdir(temporary_directory): match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id = match.group(1) file_from = match.group(2) file_length", "\"_genomic_range_\" + (genomic_range_args[0] or genomic_range_args[1]) genomic_range += '_' + (str(genomic_range_args[2]) or '0') genomic_range", "file\"\"\" file_size = self.size check_sum = self.unencrypted_checksum options = {\"destinationFormat\": \"plain\"} file_size -=", "file_size, chunk_len)] for file in os.listdir(temporary_directory): match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id = match.group(1)", "self.load_metadata() return self._display_file_name @property def name(self): if self._file_name is None: self.load_metadata() return self._file_name", "\" f\"length {max_slice_size}]...\") chunk_len = max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory, exist_ok=True) with", "self._file_name is None: self.load_metadata() return self._file_name @property def size(self): if self._file_size is None:", "min(chunk_len, file_size - chunk_start_pos), options, pbar) for chunk_start_pos in range(0, file_size, chunk_len)] for", "* 1024 * 1024: num_connections = 1 logging.info(f\"Download starting [using {num_connections} connection(s), file", "import sys import time import urllib import htsget import psutil from tqdm import", "= unencrypted_checksum self._file_status = status def load_metadata(self): res = self.data_client.get_json(f\"/metadata/files/{self.id}\") # If the", "ext) logging.debug(f\"Output file:'{ret_val}'\") return ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes,", "utils.md5(output_file, file_size) == check_sum: DataFile.print_local_file_info('Local file exists:', output_file, check_sum) return num_connections = max(num_connections,", "GiB\") logging.info(f\"Used space : {hdd.used / (2 ** 30):.2f} GiB\") logging.info(f\"Free space :", "done: try: self.download_file(output_file, num_connections, max_slice_size) done = True except Exception as e: if", "your account has access to at \" \"'https://ega-archive.org/my-datasets.php' after logging in.\") self._display_file_name =", "not necesary in plain mode if os.path.exists(output_file) and utils.md5(output_file, file_size) == check_sum: DataFile.print_local_file_info('Local", "does not have access to this file. \" \"You can check which datasets", "Exception(f\"Slice error: received={total_received}, requested={length}, file='{file_name}'\") except Exception: if os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name, final_file_name)", "the user does not have access to the file then the server returns", "value '{check_sum}' but got '{received_file_md5}'\") def download_file_slice_(self, args): return self.download_file_slice(*args) def download_file_slice(self, file_name,", "DataFile.print_local_file_info_genomic_range('Saved to : ', output_file, genomic_range_args) return done = False num_retries = 0", "status def load_metadata(self): res = self.data_client.get_json(f\"/metadata/files/{self.id}\") # If the user does not have", "return done = False num_retries = 0 while not done: try: self.download_file(output_file, num_connections,", "MD5 from the server (received: {check_sum}).\" f\" Can't validate download. Please contact EGA", "as pbar: params = [ (os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len, file_size - chunk_start_pos), options,", "'').strip().lower() if format_ext != ext and len(format_ext) > 1: ext += format_ext ret_val", "\"'https://ega-archive.org/my-datasets.php' after logging in.\") self._display_file_name = res['displayFileName'] self._file_name = res['fileName'] self._file_size = res['fileSize']", "import shutil import sys import time import urllib import htsget import psutil from", "bigger than free space, warning if hdd.free < self.size: logging.warning(f\"The size of the", "documentation for more information.\") logging.exception(e) if num_retries == max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise", "+ ext) logging.debug(f\"Output file:'{ret_val}'\") return ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)}", "start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries < 0 else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved", "self._unencrypted_checksum @property def status(self): if self._file_status is None: self.load_metadata() return self._file_status @staticmethod def", "def status(self): if self._file_status is None: self.load_metadata() return self._file_status @staticmethod def print_local_file_info(prefix_str, file,", "the necessary ports are open in your \" \"firewall. See the documentation for", "necesary in plain mode if os.path.exists(output_file) and utils.md5(output_file, file_size) == check_sum: DataFile.print_local_file_info('Local file", "'.tmp' self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size if os.path.exists(file_name) else 0 if existing_size > length:", "os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd()) logging.info(f\"Total space : {hdd.total / (2 ** 30):.2f}", "open in your \" \"firewall. See the documentation for more information.\") logging.exception(e) if", "-= 16 # 16 bytes IV not necesary in plain mode if os.path.exists(output_file)", "raise os.rename(file_name, final_file_name) return final_file_name @staticmethod def is_genomic_range(genomic_range_args): if not genomic_range_args: return False", "file because the MAX_SLICE_SIZE parameter (' f'and thus the slice sizes) have been", "htsget import psutil from tqdm import tqdm from pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE =", "file_id = match.group(1) file_from = match.group(2) file_length = match.group(3) if file_id != self.id:", "def name(self): if self._file_name is None: self.load_metadata() return self._file_name @property def size(self): if", "in [(param[1], param[2]) for param in params]: continue logging.warning(f'Deleting the leftover {file} temporary", "helpdesk on <EMAIL>\") with open(utils.get_fname_md5(output_file), 'wb') as f: # save good md5 in", "= 0 while not done: try: self.download_file(output_file, num_connections, max_slice_size) done = True except", "= match.group(2) file_length = match.group(3) if file_id != self.id: continue if (file_from, file_length)", "logging.info(f\"retry attempt {num_retries}\") def delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory) except FileNotFoundError as ex: logging.error(f'Could", "more information.\") logging.exception(e) if num_retries == max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait)", "self._file_size @property def unencrypted_checksum(self): if self._unencrypted_checksum is None: self.load_metadata() return self._unencrypted_checksum @property def", "else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to : ', output_file, genomic_range_args) return done =", "raise e time.sleep(retry_wait) num_retries += 1 logging.info(f\"retry attempt {num_retries}\") def delete_temporary_folder(self, temporary_directory): try:", "exists:', output_file, check_sum) return num_connections = max(num_connections, 1) num_connections = min(num_connections, 128) if", "done = True except Exception as e: if e is ConnectionError: logging.info(\"Failed to", "md5 (this operation can take a long time depending on the file size)\")", "= {\"destinationFormat\": \"plain\"} file_size -= 16 # 16 bytes IV not necesary in", "f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name + '.tmp' self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size if os.path.exists(file_name) else", "> length: os.remove(file_name) if pbar: pbar.update(existing_size) if existing_size == length: return file_name try:", "if existing_size > length: os.remove(file_name) if pbar: pbar.update(existing_size) if existing_size == length: return", "be positive\") path = f\"/files/{self.id}\" if options is not None: path += '?'", "= False def __init__(self, data_client, file_id, display_file_name=None, file_name=None, size=None, unencrypted_checksum=None, status=None): self.data_client =", "total_received = os.path.getsize(file_name) if total_received != length: raise Exception(f\"Slice error: received={total_received}, requested={length}, file='{file_name}'\")", "length: raise Exception(f\"Slice error: received={total_received}, requested={length}, file='{file_name}'\") except Exception: if os.path.exists(file_name): os.remove(file_name) raise", "output_file, genomic_range_args) return done = False num_retries = 0 while not done: try:", "self._file_status = res['fileStatus'] @property def display_name(self): if self._display_file_name is None: self.load_metadata() return self._display_file_name", "[(param[1], param[2]) for param in params]: continue logging.warning(f'Deleting the leftover {file} temporary file", "not supported.\" \" Please email EGA Helpdesk at <EMAIL>\") return logging.info(f\"File Id: '{self.id}'({self.size}", "self._display_file_name = display_file_name self._file_name = file_name self._file_size = size self._unencrypted_checksum = unencrypted_checksum self._file_status", "output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries < 0 else max_retries,", "\" \"firewall. See the documentation for more information.\") logging.exception(e) if num_retries == max_retries:", "format={gr_args[4]})\" ) def download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"): logging.info(", "open(output_file, 'wb') as output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize", "self.id = file_id self.temporary_files = set() self._display_file_name = display_file_name self._file_name = file_name self._file_size", "id '{self.id}' could not be retrieved. \" + \"This is probably because your", "referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\" ) def download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE):", "\" \"'https://ega-archive.org/my-datasets.php' after logging in.\") self._display_file_name = res['displayFileName'] self._file_name = res['fileName'] self._file_size =", "import logging.handlers import os import re import shutil import sys import time import", "concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name in executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close() downloaded_file_total_size = sum(os.path.getsize(f)", "time depending on the file size)\") received_file_md5 = utils.md5(output_file, file_size) logging.info(\"Verifying file checksum\")", "/ (2 ** 30):.2f} GiB\") # If file is bigger than free space,", "import logging import logging.handlers import os import re import shutil import sys import", "temporary file because the MAX_SLICE_SIZE parameter (' f'and thus the slice sizes) have", "has # all the fields empty if res['displayFileName'] is None or res['unencryptedChecksum'] is", "got '{received_file_md5}'\") def download_file_slice_(self, args): return self.download_file_slice(*args) def download_file_slice(self, file_name, start_pos, length, options=None,", "@staticmethod def print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]},", "None or res['unencryptedChecksum'] is None: raise RuntimeError(f\"Metadata for file id '{self.id}' could not", "check_sum) if not_valid_server_md5: logging.info( f\"WARNING: Unable to obtain valid MD5 from the server", "param[2]) for param in params]: continue logging.warning(f'Deleting the leftover {file} temporary file because", "os.path.join(os.path.dirname(output_file), \".tmp_download\") if not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd()) logging.info(f\"Total space : {hdd.total", "check_sum = self.unencrypted_checksum options = {\"destinationFormat\": \"plain\"} file_size -= 16 # 16 bytes", "return num_connections = max(num_connections, 1) num_connections = min(num_connections, 128) if file_size < 100", "for chunk_start_pos in range(0, file_size, chunk_len)] for file in os.listdir(temporary_directory): match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\",", "file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\") def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an individual", "chunk_start_pos), options, pbar) for chunk_start_pos in range(0, file_size, chunk_len)] for file in os.listdir(temporary_directory):", "not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd()) logging.info(f\"Total space : {hdd.total / (2 **", "num_connections, max_slice_size) done = True except Exception as e: if e is ConnectionError:", "1 logging.info(f\"retry attempt {num_retries}\") def delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory) except FileNotFoundError as ex:", "file_name try: with self.data_client.get_stream(path, { 'Range': f'bytes={start_pos + existing_size}-{start_pos + length - 1}'})", "requested={length}, file='{file_name}'\") except Exception: if os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name, final_file_name) return final_file_name @staticmethod", "modified since the last run.') os.remove(os.path.join(temporary_directory, file)) results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as", "= self.generate_output_filename(output_dir, genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") if not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd =", "is not None def generate_output_filename(self, folder, genomic_range_args): file_name = self.display_name ext_to_remove = \".cip\"", "def unencrypted_checksum(self): if self._unencrypted_checksum is None: self.load_metadata() return self._unencrypted_checksum @property def status(self): if", "file_size -= 16 # 16 bytes IV not necesary in plain mode if", "user does not have access to the file then the server returns HTTP", "e is ConnectionError: logging.info(\"Failed to connect to data service. Check that the necessary", "self.size: logging.warning(f\"The size of the file that you want to download is bigger", "individual file\"\"\" file_size = self.size check_sum = self.unencrypted_checksum options = {\"destinationFormat\": \"plain\"} file_size", "= '' if DataFile.is_genomic_range(genomic_range_args): genomic_range = \"_genomic_range_\" + (genomic_range_args[0] or genomic_range_args[1]) genomic_range +=", "import concurrent.futures import logging import logging.handlers import os import re import shutil import", "mode if os.path.exists(output_file) and utils.md5(output_file, file_size) == check_sum: DataFile.print_local_file_info('Local file exists:', output_file, check_sum)", "match.group(2) file_length = match.group(3) if file_id != self.id: continue if (file_from, file_length) in", "sum(os.path.getsize(f) for f in results) if downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5", "information.\") logging.exception(e) if num_retries == max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait) num_retries", "end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries < 0 else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to", "import time import urllib import htsget import psutil from tqdm import tqdm from", "with self.data_client.get_stream(path, { 'Range': f'bytes={start_pos + existing_size}-{start_pos + length - 1}'}) as r:", "If the user does not have access to the file then the server", "def display_name(self): if self._display_file_name is None: self.load_metadata() return self._display_file_name @property def name(self): if", "max(num_connections, 1) num_connections = min(num_connections, 128) if file_size < 100 * 1024 *", "display_file_name self._file_name = file_name self._file_size = size self._unencrypted_checksum = unencrypted_checksum self._file_status = status", "genomic_range_args) return done = False num_retries = 0 while not done: try: self.download_file(output_file,", "output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an individual file\"\"\" file_size = self.size check_sum = self.unencrypted_checksum", "self.load_metadata() return self._unencrypted_checksum @property def status(self): if self._file_status is None: self.load_metadata() return self._file_status", "file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum or '')) != 32 logging.info(\"Calculating md5", "file_id, display_file_name=None, file_name=None, size=None, unencrypted_checksum=None, status=None): self.data_client = data_client self.id = file_id self.temporary_files", "def print_local_file_info(prefix_str, file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\") def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download", "depending on the file size)\") received_file_md5 = utils.md5(output_file, file_size) logging.info(\"Verifying file checksum\") if", "self._unencrypted_checksum = unencrypted_checksum self._file_status = status def load_metadata(self): res = self.data_client.get_json(f\"/metadata/files/{self.id}\") # If", "pbar: pbar.update(existing_size) if existing_size == length: return file_name try: with self.data_client.get_stream(path, { 'Range':", "'{received_file_md5}'\") def download_file_slice_(self, args): return self.download_file_slice(*args) def download_file_slice(self, file_name, start_pos, length, options=None, pbar=None):", "is ConnectionError: logging.info(\"Failed to connect to data service. Check that the necessary ports", "os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar: params = [", "0: raise ValueError(\"length : must be positive\") path = f\"/files/{self.id}\" if options is", "data service. Check that the necessary ports are open in your \" \"firewall.", "utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum or '')) != 32 logging.info(\"Calculating md5 (this", "'_' + (str(genomic_range_args[2]) or '0') genomic_range += '_' + (str(genomic_range_args[3]) or '') format_ext", "sys import time import urllib import htsget import psutil from tqdm import tqdm", "the slice sizes) have been modified since the last run.') os.remove(os.path.join(temporary_directory, file)) results", "res['fileStatus'] @property def display_name(self): if self._display_file_name is None: self.load_metadata() return self._display_file_name @property def", "name(self): if self._file_name is None: self.load_metadata() return self._file_name @property def size(self): if self._file_size", "gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\" ) def download_file_retry(self,", "datasets your account has access to at \" \"'https://ega-archive.org/my-datasets.php' after logging in.\") self._display_file_name", "self.data_client.get_stream(path, { 'Range': f'bytes={start_pos + existing_size}-{start_pos + length - 1}'}) as r: with", "import re import shutil import sys import time import urllib import htsget import", "checksum\") if received_file_md5 == check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved to : ', output_file, check_sum)", "if not_valid_server_md5: logging.info( f\"WARNING: Unable to obtain valid MD5 from the server (received:", "(' f'and thus the slice sizes) have been modified since the last run.')", "to : ', output_file, check_sum) if not_valid_server_md5: logging.info( f\"WARNING: Unable to obtain valid", "or genomic_range_args[1]) genomic_range += '_' + (str(genomic_range_args[2]) or '0') genomic_range += '_' +", "DataFile.print_local_file_info('Saved to : ', output_file, check_sum) if not_valid_server_md5: logging.info( f\"WARNING: Unable to obtain", "self.id), chunk_start_pos, min(chunk_len, file_size - chunk_start_pos), options, pbar) for chunk_start_pos in range(0, file_size,", "max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an individual file\"\"\" file_size = self.size check_sum = self.unencrypted_checksum options =", "EGA helpdesk on <EMAIL>\") with open(utils.get_fname_md5(output_file), 'wb') as f: # save good md5", "def generate_output_filename(self, folder, genomic_range_args): file_name = self.display_name ext_to_remove = \".cip\" if file_name.endswith(ext_to_remove): file_name", "= file_id self.temporary_files = set() self._display_file_name = display_file_name self._file_name = file_name self._file_size =", ") def download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"): logging.info( \"GPG", "* 1024 temporary_files_should_be_deleted = False def __init__(self, data_client, file_id, display_file_name=None, file_name=None, size=None, unencrypted_checksum=None,", "= os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar: params =", "existing_size > length: os.remove(file_name) if pbar: pbar.update(existing_size) if existing_size == length: return file_name", "operation can take a long time depending on the file size)\") received_file_md5 =", "def download_file_slice(self, file_name, start_pos, length, options=None, pbar=None): if start_pos < 0: raise ValueError(\"start", "the file that you want to download is bigger than your free space", "num_retries = 0 while not done: try: self.download_file(output_file, num_connections, max_slice_size) done = True", "your \" \"firewall. See the documentation for more information.\") logging.exception(e) if num_retries ==", "'?' + urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name + '.tmp' self.temporary_files.add(file_name) existing_size", "attempt {num_retries}\") def delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory) except FileNotFoundError as ex: logging.error(f'Could not", "# 16 bytes IV not necesary in plain mode if os.path.exists(output_file) and utils.md5(output_file,", "file in os.listdir(temporary_directory): match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id = match.group(1) file_from = match.group(2)", "data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries < 0 else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to :", "size self._unencrypted_checksum = unencrypted_checksum self._file_status = status def load_metadata(self): res = self.data_client.get_json(f\"/metadata/files/{self.id}\") #", "file_name = final_file_name + '.tmp' self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size if os.path.exists(file_name) else 0", "def download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"): logging.info( \"GPG files", "0 while not done: try: self.download_file(output_file, num_connections, max_slice_size) done = True except Exception", "res['displayFileName'] self._file_name = res['fileName'] self._file_size = res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status = res['fileStatus']", "logging.info(f\"Used space : {hdd.used / (2 ** 30):.2f} GiB\") logging.info(f\"Free space : {hdd.free", "genomic_range = \"_genomic_range_\" + (genomic_range_args[0] or genomic_range_args[1]) genomic_range += '_' + (str(genomic_range_args[2]) or", "results.append(part_file_name) pbar.close() downloaded_file_total_size = sum(os.path.getsize(f) for f in results) if downloaded_file_total_size == file_size:", "the fields empty if res['displayFileName'] is None or res['unencryptedChecksum'] is None: raise RuntimeError(f\"Metadata", "os.remove(output_file) raise Exception(f\"Download process expected md5 value '{check_sum}' but got '{received_file_md5}'\") def download_file_slice_(self,", "temporary_files_should_be_deleted = False def __init__(self, data_client, file_id, display_file_name=None, file_name=None, size=None, unencrypted_checksum=None, status=None): self.data_client", "Please contact EGA helpdesk on <EMAIL>\") with open(utils.get_fname_md5(output_file), 'wb') as f: # save", "sizes) have been modified since the last run.') os.remove(os.path.join(temporary_directory, file)) results = []", "from tqdm import tqdm from pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024", "bytes IV not necesary in plain mode if os.path.exists(output_file) and utils.md5(output_file, file_size) ==", "path += '?' + urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name + '.tmp'", "file_name = self.display_name ext_to_remove = \".cip\" if file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)] name, ext", "raise RuntimeError(f\"Metadata for file id '{self.id}' could not be retrieved. \" + \"This", "self._display_file_name = res['displayFileName'] self._file_name = res['fileName'] self._file_size = res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status", "os.remove(file_name) raise os.rename(file_name, final_file_name) return final_file_name @staticmethod def is_genomic_range(genomic_range_args): if not genomic_range_args: return", "self._file_status is None: self.load_metadata() return self._file_status @staticmethod def print_local_file_info(prefix_str, file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes,", "if os.path.exists(file_name) else 0 if existing_size > length: os.remove(file_name) if pbar: pbar.update(existing_size) if", "+ (str(genomic_range_args[2]) or '0') genomic_range += '_' + (str(genomic_range_args[3]) or '') format_ext =", "ret_val = os.path.join(folder, self.id, name + genomic_range + ext) logging.debug(f\"Output file:'{ret_val}'\") return ret_val", "max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"): logging.info( \"GPG files are currently not supported.\" \" Please email", "psutil.disk_usage(os.getcwd()) logging.info(f\"Total space : {hdd.total / (2 ** 30):.2f} GiB\") logging.info(f\"Used space :", "while not done: try: self.download_file(output_file, num_connections, max_slice_size) done = True except Exception as", "+ (genomic_range_args[4] or '').strip().lower() if format_ext != ext and len(format_ext) > 1: ext", "slice sizes) have been modified since the last run.') os.remove(os.path.join(temporary_directory, file)) results =", "num_connections = max(num_connections, 1) num_connections = min(num_connections, 128) if file_size < 100 *", "res['unencryptedChecksum'] self._file_status = res['fileStatus'] @property def display_name(self): if self._display_file_name is None: self.load_metadata() return", "self._file_size is None: self.load_metadata() return self._file_size @property def unencrypted_checksum(self): if self._unencrypted_checksum is None:", "{hdd.used / (2 ** 30):.2f} GiB\") logging.info(f\"Free space : {hdd.free / (2 **", "re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id = match.group(1) file_from = match.group(2) file_length = match.group(3) if file_id", "the server (received: {check_sum}).\" f\" Can't validate download. Please contact EGA helpdesk on", "last run.') os.remove(os.path.join(temporary_directory, file)) results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name", "DataFile: DEFAULT_SLICE_SIZE = 100 * 1024 * 1024 temporary_files_should_be_deleted = False def __init__(self,", "final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name + '.tmp' self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size if", "connect to data service. Check that the necessary ports are open in your", "# all the fields empty if res['displayFileName'] is None or res['unencryptedChecksum'] is None:", "in.\") self._display_file_name = res['displayFileName'] self._file_name = res['fileName'] self._file_size = res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum']", "<EMAIL>\") with open(utils.get_fname_md5(output_file), 'wb') as f: # save good md5 in aux file", "None: self.load_metadata() return self._unencrypted_checksum @property def status(self): if self._file_status is None: self.load_metadata() return", "from pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024 class DataFile: DEFAULT_SLICE_SIZE =", "as f: # save good md5 in aux file for future re-use f.write(received_file_md5.encode())", "done = False num_retries = 0 while not done: try: self.download_file(output_file, num_connections, max_slice_size)", "size(self): if self._file_size is None: self.load_metadata() return self._file_size @property def unencrypted_checksum(self): if self._unencrypted_checksum", "in this \" f\"location\") if DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb') as output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\",", "status=None): self.data_client = data_client self.id = file_id self.temporary_files = set() self._display_file_name = display_file_name", "1024: num_connections = 1 logging.info(f\"Download starting [using {num_connections} connection(s), file size {file_size} and", "{file} temporary file because the MAX_SLICE_SIZE parameter (' f'and thus the slice sizes)", "file id '{self.id}' could not be retrieved. \" + \"This is probably because", "download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"): logging.info( \"GPG files are", "total_received != length: raise Exception(f\"Slice error: received={total_received}, requested={length}, file='{file_name}'\") except Exception: if os.path.exists(file_name):", "* 1024 * 1024 temporary_files_should_be_deleted = False def __init__(self, data_client, file_id, display_file_name=None, file_name=None,", "take a long time depending on the file size)\") received_file_md5 = utils.md5(output_file, file_size)", "concurrent.futures import logging import logging.handlers import os import re import shutil import sys", "or res['unencryptedChecksum'] is None: raise RuntimeError(f\"Metadata for file id '{self.id}' could not be", "not_valid_server_md5 = len(str(check_sum or '')) != 32 logging.info(\"Calculating md5 (this operation can take", "= min(num_connections, 128) if file_size < 100 * 1024 * 1024: num_connections =", "size)\") received_file_md5 = utils.md5(output_file, file_size) logging.info(\"Verifying file checksum\") if received_file_md5 == check_sum or", "path = f\"/files/{self.id}\" if options is not None: path += '?' + urllib.parse.urlencode(options)", "obtain valid MD5 from the server (received: {check_sum}).\" f\" Can't validate download. Please", "+ existing_size}-{start_pos + length - 1}'}) as r: with open(file_name, 'ba') as file_out:", "self._display_file_name @property def name(self): if self._file_name is None: self.load_metadata() return self._file_name @property def", "params = [ (os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len, file_size - chunk_start_pos), options, pbar) for", "os.remove(os.path.join(temporary_directory, file)) results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name in executor.map(self.download_file_slice_,", "'' if DataFile.is_genomic_range(genomic_range_args): genomic_range = \"_genomic_range_\" + (genomic_range_args[0] or genomic_range_args[1]) genomic_range += '_'", "temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") if not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd()) logging.info(f\"Total space", "as e: if e is ConnectionError: logging.info(\"Failed to connect to data service. Check", "raise Exception(f\"Slice error: received={total_received}, requested={length}, file='{file_name}'\") except Exception: if os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name,", "for more information.\") logging.exception(e) if num_retries == max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e", "must be positive\") if length <= 0: raise ValueError(\"length : must be positive\")", "ext_to_remove = \".cip\" if file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)] name, ext = os.path.splitext(os.path.basename(file_name)) genomic_range", "{check_sum}).\" f\" Can't validate download. Please contact EGA helpdesk on <EMAIL>\") with open(utils.get_fname_md5(output_file),", "options, pbar) for chunk_start_pos in range(0, file_size, chunk_len)] for file in os.listdir(temporary_directory): match", "== length: return file_name try: with self.data_client.get_stream(path, { 'Range': f'bytes={start_pos + existing_size}-{start_pos +", "file_size) logging.info(\"Verifying file checksum\") if received_file_md5 == check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved to :", "@property def size(self): if self._file_size is None: self.load_metadata() return self._file_size @property def unencrypted_checksum(self):", "+= '_' + (str(genomic_range_args[3]) or '') format_ext = '.' + (genomic_range_args[4] or '').strip().lower()", "+= format_ext ret_val = os.path.join(folder, self.id, name + genomic_range + ext) logging.debug(f\"Output file:'{ret_val}'\")", "if not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd()) logging.info(f\"Total space : {hdd.total / (2", "is None: self.load_metadata() return self._file_size @property def unencrypted_checksum(self): if self._unencrypted_checksum is None: self.load_metadata()", "because the MAX_SLICE_SIZE parameter (' f'and thus the slice sizes) have been modified", "[] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name in executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close() downloaded_file_total_size", "downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum or '')) != 32 logging.info(\"Calculating md5 (this operation can", "os.stat(file_name).st_size if os.path.exists(file_name) else 0 if existing_size > length: os.remove(file_name) if pbar: pbar.update(existing_size)", "existing_size}-{start_pos + length - 1}'}) as r: with open(file_name, 'ba') as file_out: for", "= len(str(check_sum or '')) != 32 logging.info(\"Calculating md5 (this operation can take a", "False return genomic_range_args[0] is not None or genomic_range_args[1] is not None def generate_output_filename(self,", "'wb') as output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if", "is not None: path += '?' + urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name =", "if not genomic_range_args: return False return genomic_range_args[0] is not None or genomic_range_args[1] is", "to data service. Check that the necessary ports are open in your \"", "logging.info(\"Failed to connect to data service. Check that the necessary ports are open", "size {file_size} and chunk \" f\"length {max_slice_size}]...\") chunk_len = max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file),", "genomic_range_args[1] is not None def generate_output_filename(self, folder, genomic_range_args): file_name = self.display_name ext_to_remove =", "args): return self.download_file_slice(*args) def download_file_slice(self, file_name, start_pos, length, options=None, pbar=None): if start_pos <", "server (received: {check_sum}).\" f\" Can't validate download. Please contact EGA helpdesk on <EMAIL>\")", "re import shutil import sys import time import urllib import htsget import psutil", "file_length = match.group(3) if file_id != self.id: continue if (file_from, file_length) in [(param[1],", "chunk_len)] for file in os.listdir(temporary_directory): match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id = match.group(1) file_from", "download. Please contact EGA helpdesk on <EMAIL>\") with open(utils.get_fname_md5(output_file), 'wb') as f: #", "received_file_md5 == check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved to : ', output_file, check_sum) if not_valid_server_md5:", "length: return file_name try: with self.data_client.get_stream(path, { 'Range': f'bytes={start_pos + existing_size}-{start_pos + length", "num_connections = min(num_connections, 128) if file_size < 100 * 1024 * 1024: num_connections", "self.download_file_slice(*args) def download_file_slice(self, file_name, start_pos, length, options=None, pbar=None): if start_pos < 0: raise", "time import urllib import htsget import psutil from tqdm import tqdm from pyega3.libs", "is_genomic_range(genomic_range_args): if not genomic_range_args: return False return genomic_range_args[0] is not None or genomic_range_args[1]", "valid MD5 from the server (received: {check_sum}).\" f\" Can't validate download. Please contact", "if self._file_size is None: self.load_metadata() return self._file_size @property def unencrypted_checksum(self): if self._unencrypted_checksum is", "\" \"You can check which datasets your account has access to at \"", "@property def status(self): if self._file_status is None: self.load_metadata() return self._file_status @staticmethod def print_local_file_info(prefix_str,", "file_id != self.id: continue if (file_from, file_length) in [(param[1], param[2]) for param in", "None: self.load_metadata() return self._display_file_name @property def name(self): if self._file_name is None: self.load_metadata() return", "bytes, md5={md5})\") def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an individual file\"\"\" file_size =", ": must be positive\") path = f\"/files/{self.id}\" if options is not None: path", "length: os.remove(file_name) if pbar: pbar.update(existing_size) if existing_size == length: return file_name try: with", "as output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries", "if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait) num_retries += 1 logging.info(f\"retry attempt {num_retries}\") def", "free space, warning if hdd.free < self.size: logging.warning(f\"The size of the file that", "res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status = res['fileStatus'] @property def display_name(self): if self._display_file_name is", "RuntimeError(f\"Metadata for file id '{self.id}' could not be retrieved. \" + \"This is", "the MAX_SLICE_SIZE parameter (' f'and thus the slice sizes) have been modified since", "if format_ext != ext and len(format_ext) > 1: ext += format_ext ret_val =", "def load_metadata(self): res = self.data_client.get_json(f\"/metadata/files/{self.id}\") # If the user does not have access", "+ '.tmp' self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size if os.path.exists(file_name) else 0 if existing_size >", "file_name self._file_size = size self._unencrypted_checksum = unencrypted_checksum self._file_status = status def load_metadata(self): res", "None: self.load_metadata() return self._file_status @staticmethod def print_local_file_info(prefix_str, file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\") def", "self.name.endswith(\".gpg\"): logging.info( \"GPG files are currently not supported.\" \" Please email EGA Helpdesk", "DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024 class DataFile: DEFAULT_SLICE_SIZE = 100 * 1024 *", "= os.stat(file_name).st_size if os.path.exists(file_name) else 0 if existing_size > length: os.remove(file_name) if pbar:", "if self._unencrypted_checksum is None: self.load_metadata() return self._unencrypted_checksum @property def status(self): if self._file_status is", "file that you want to download is bigger than your free space in", "0 if existing_size > length: os.remove(file_name) if pbar: pbar.update(existing_size) if existing_size == length:", "length - 1}'}) as r: with open(file_name, 'ba') as file_out: for chunk in", "None: self.load_metadata() return self._file_size @property def unencrypted_checksum(self): if self._unencrypted_checksum is None: self.load_metadata() return", "'0') genomic_range += '_' + (str(genomic_range_args[3]) or '') format_ext = '.' + (genomic_range_args[4]", "start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\" ) def download_file_retry(self, num_connections, output_dir, genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if", "Exception as e: if e is ConnectionError: logging.info(\"Failed to connect to data service.", "continue logging.warning(f'Deleting the leftover {file} temporary file because the MAX_SLICE_SIZE parameter (' f'and", "delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory) except FileNotFoundError as ex: logging.error(f'Could not delete the temporary", "utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024 class DataFile: DEFAULT_SLICE_SIZE = 100 * 1024", "import urllib import htsget import psutil from tqdm import tqdm from pyega3.libs import", "/ (2 ** 30):.2f} GiB\") logging.info(f\"Used space : {hdd.used / (2 ** 30):.2f}", "file then the server returns HTTP code 200 but the JSON payload has", "= f\"/files/{self.id}\" if options is not None: path += '?' + urllib.parse.urlencode(options) final_file_name", "ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]}, start={gr_args[2]},", "final_file_name) return final_file_name @staticmethod def is_genomic_range(genomic_range_args): if not genomic_range_args: return False return genomic_range_args[0]", "options = {\"destinationFormat\": \"plain\"} file_size -= 16 # 16 bytes IV not necesary", "genomic_range_args, max_retries, retry_wait, max_slice_size=DEFAULT_SLICE_SIZE): if self.name.endswith(\".gpg\"): logging.info( \"GPG files are currently not supported.\"", "if os.path.exists(output_file) and utils.md5(output_file, file_size) == check_sum: DataFile.print_local_file_info('Local file exists:', output_file, check_sum) return", "is probably because your account does not have access to this file. \"", "self.load_metadata() return self._file_status @staticmethod def print_local_file_info(prefix_str, file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\") def download_file(self,", "ValueError(\"start : must be positive\") if length <= 0: raise ValueError(\"length : must", "logging.warning(f'Deleting the leftover {file} temporary file because the MAX_SLICE_SIZE parameter (' f'and thus", "Can't validate download. Please contact EGA helpdesk on <EMAIL>\") with open(utils.get_fname_md5(output_file), 'wb') as", "if existing_size == length: return file_name try: with self.data_client.get_stream(path, { 'Range': f'bytes={start_pos +", "generate_output_filename(self, folder, genomic_range_args): file_name = self.display_name ext_to_remove = \".cip\" if file_name.endswith(ext_to_remove): file_name =", "See the documentation for more information.\") logging.exception(e) if num_retries == max_retries: if DataFile.temporary_files_should_be_deleted:", "account does not have access to this file. \" \"You can check which", "< 100 * 1024 * 1024: num_connections = 1 logging.info(f\"Download starting [using {num_connections}", "[using {num_connections} connection(s), file size {file_size} and chunk \" f\"length {max_slice_size}]...\") chunk_len =", "{num_connections} connection(s), file size {file_size} and chunk \" f\"length {max_slice_size}]...\") chunk_len = max_slice_size", "max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar:", "or '0') genomic_range += '_' + (str(genomic_range_args[3]) or '') format_ext = '.' +", "self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait) num_retries += 1 logging.info(f\"retry attempt {num_retries}\") def delete_temporary_folder(self, temporary_directory):", "Helpdesk at <EMAIL>\") return logging.info(f\"File Id: '{self.id}'({self.size} bytes).\") output_file = self.generate_output_filename(output_dir, genomic_range_args) temporary_directory", "to the file then the server returns HTTP code 200 but the JSON", "(os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len, file_size - chunk_start_pos), options, pbar) for chunk_start_pos in range(0,", "max_slice_size) done = True except Exception as e: if e is ConnectionError: logging.info(\"Failed", "DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb') as output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3],", "of the file that you want to download is bigger than your free", "return False return genomic_range_args[0] is not None or genomic_range_args[1] is not None def", "\"You can check which datasets your account has access to at \" \"'https://ega-archive.org/my-datasets.php'", "for file in os.listdir(temporary_directory): match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id = match.group(1) file_from =", "file. \" \"You can check which datasets your account has access to at", "return self._file_status @staticmethod def print_local_file_info(prefix_str, file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\") def download_file(self, output_file,", "try: with self.data_client.get_stream(path, { 'Range': f'bytes={start_pos + existing_size}-{start_pos + length - 1}'}) as", "max_retries=sys.maxsize if max_retries < 0 else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to : ',", "reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries < 0 else max_retries, retry_wait=retry_wait,", "[ (os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len, file_size - chunk_start_pos), options, pbar) for chunk_start_pos in", "output_file = self.generate_output_filename(output_dir, genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") if not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd", "if os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name, final_file_name) return final_file_name @staticmethod def is_genomic_range(genomic_range_args): if not", "DataFile.is_genomic_range(genomic_range_args): genomic_range = \"_genomic_range_\" + (genomic_range_args[0] or genomic_range_args[1]) genomic_range += '_' + (str(genomic_range_args[2])", ": {hdd.total / (2 ** 30):.2f} GiB\") logging.info(f\"Used space : {hdd.used / (2", "f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\" ) def download_file_retry(self, num_connections, output_dir,", "= False num_retries = 0 while not done: try: self.download_file(output_file, num_connections, max_slice_size) done", "genomic_range = '' if DataFile.is_genomic_range(genomic_range_args): genomic_range = \"_genomic_range_\" + (genomic_range_args[0] or genomic_range_args[1]) genomic_range", "= self.data_client.get_json(f\"/metadata/files/{self.id}\") # If the user does not have access to the file", "= set() self._display_file_name = display_file_name self._file_name = file_name self._file_size = size self._unencrypted_checksum =", "download_file_slice_(self, args): return self.download_file_slice(*args) def download_file_slice(self, file_name, start_pos, length, options=None, pbar=None): if start_pos", "file_name, start_pos, length, options=None, pbar=None): if start_pos < 0: raise ValueError(\"start : must", "(genomic_range_args[4] or '').strip().lower() if format_ext != ext and len(format_ext) > 1: ext +=", "class DataFile: DEFAULT_SLICE_SIZE = 100 * 1024 * 1024 temporary_files_should_be_deleted = False def", "file_size < 100 * 1024 * 1024: num_connections = 1 logging.info(f\"Download starting [using", "+ urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name + '.tmp' self.temporary_files.add(file_name) existing_size =", "received_file_md5 = utils.md5(output_file, file_size) logging.info(\"Verifying file checksum\") if received_file_md5 == check_sum or not_valid_server_md5:", "unit='B', unit_scale=True) as pbar: params = [ (os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len, file_size -", "with open(file_name, 'ba') as file_out: for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar: pbar.update(len(chunk))", "= file_name[:-len(ext_to_remove)] name, ext = os.path.splitext(os.path.basename(file_name)) genomic_range = '' if DataFile.is_genomic_range(genomic_range_args): genomic_range =", "file='{file_name}'\") except Exception: if os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name, final_file_name) return final_file_name @staticmethod def", ": ', output_file, genomic_range_args) return done = False num_retries = 0 while not", "future re-use f.write(received_file_md5.encode()) else: os.remove(output_file) raise Exception(f\"Download process expected md5 value '{check_sum}' but", "'.' + (genomic_range_args[4] or '').strip().lower() if format_ext != ext and len(format_ext) > 1:", "\" f\"location\") if DataFile.is_genomic_range(genomic_range_args): with open(output_file, 'wb') as output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0],", "contact EGA helpdesk on <EMAIL>\") with open(utils.get_fname_md5(output_file), 'wb') as f: # save good", "!= 32 logging.info(\"Calculating md5 (this operation can take a long time depending on", "fields empty if res['displayFileName'] is None or res['unencryptedChecksum'] is None: raise RuntimeError(f\"Metadata for", "for param in params]: continue logging.warning(f'Deleting the leftover {file} temporary file because the", "genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") if not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd()) logging.info(f\"Total", "as r: with open(file_name, 'ba') as file_out: for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if", "bytes).\") output_file = self.generate_output_filename(output_dir, genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") if not os.path.exists(temporary_directory): os.makedirs(temporary_directory)", "tqdm from pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024 class DataFile: DEFAULT_SLICE_SIZE", "= [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name in executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close()", "# save good md5 in aux file for future re-use f.write(received_file_md5.encode()) else: os.remove(output_file)", "'')) != 32 logging.info(\"Calculating md5 (this operation can take a long time depending", "0: raise ValueError(\"start : must be positive\") if length <= 0: raise ValueError(\"length", "= '.' + (genomic_range_args[4] or '').strip().lower() if format_ext != ext and len(format_ext) >", "unencrypted_checksum(self): if self._unencrypted_checksum is None: self.load_metadata() return self._unencrypted_checksum @property def status(self): if self._file_status", "not have access to the file then the server returns HTTP code 200", "match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id = match.group(1) file_from = match.group(2) file_length = match.group(3)", "not_valid_server_md5: DataFile.print_local_file_info('Saved to : ', output_file, check_sum) if not_valid_server_md5: logging.info( f\"WARNING: Unable to", "existing_size == length: return file_name try: with self.data_client.get_stream(path, { 'Range': f'bytes={start_pos + existing_size}-{start_pos", "+ genomic_range + ext) logging.debug(f\"Output file:'{ret_val}'\") return ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str, file, gr_args):", "print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\" )", "e time.sleep(retry_wait) num_retries += 1 logging.info(f\"retry attempt {num_retries}\") def delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory)", "def download_file_slice_(self, args): return self.download_file_slice(*args) def download_file_slice(self, file_name, start_pos, length, options=None, pbar=None): if", "= self.display_name ext_to_remove = \".cip\" if file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)] name, ext =", "self._file_name @property def size(self): if self._file_size is None: self.load_metadata() return self._file_size @property def", "file_size = self.size check_sum = self.unencrypted_checksum options = {\"destinationFormat\": \"plain\"} file_size -= 16", "format_ext ret_val = os.path.join(folder, self.id, name + genomic_range + ext) logging.debug(f\"Output file:'{ret_val}'\") return", "display_name(self): if self._display_file_name is None: self.load_metadata() return self._display_file_name @property def name(self): if self._file_name", "= res['fileStatus'] @property def display_name(self): if self._display_file_name is None: self.load_metadata() return self._display_file_name @property", "except Exception as e: if e is ConnectionError: logging.info(\"Failed to connect to data", "= file_name self._file_size = size self._unencrypted_checksum = unencrypted_checksum self._file_status = status def load_metadata(self):", "retrieved. \" + \"This is probably because your account does not have access", "file_size - chunk_start_pos), options, pbar) for chunk_start_pos in range(0, file_size, chunk_len)] for file", "import os import re import shutil import sys import time import urllib import", "None def generate_output_filename(self, folder, genomic_range_args): file_name = self.display_name ext_to_remove = \".cip\" if file_name.endswith(ext_to_remove):", "in aux file for future re-use f.write(received_file_md5.encode()) else: os.remove(output_file) raise Exception(f\"Download process expected", "{file_size} and chunk \" f\"length {max_slice_size}]...\") chunk_len = max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\")", "length <= 0: raise ValueError(\"length : must be positive\") path = f\"/files/{self.id}\" if", "1024 * 1024: num_connections = 1 logging.info(f\"Download starting [using {num_connections} connection(s), file size", "+= '?' + urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name + '.tmp' self.temporary_files.add(file_name)", "genomic_range += '_' + (str(genomic_range_args[2]) or '0') genomic_range += '_' + (str(genomic_range_args[3]) or", "at <EMAIL>\") return logging.info(f\"File Id: '{self.id}'({self.size} bytes).\") output_file = self.generate_output_filename(output_dir, genomic_range_args) temporary_directory =", "f'and thus the slice sizes) have been modified since the last run.') os.remove(os.path.join(temporary_directory,", "validate download. Please contact EGA helpdesk on <EMAIL>\") with open(utils.get_fname_md5(output_file), 'wb') as f:", "file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)] name, ext = os.path.splitext(os.path.basename(file_name)) genomic_range = '' if DataFile.is_genomic_range(genomic_range_args):", "file checksum\") if received_file_md5 == check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved to : ', output_file,", "tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar: params = [ (os.path.join(temporary_directory, self.id), chunk_start_pos, min(chunk_len, file_size", "res['displayFileName'] is None or res['unencryptedChecksum'] is None: raise RuntimeError(f\"Metadata for file id '{self.id}'", "empty if res['displayFileName'] is None or res['unencryptedChecksum'] is None: raise RuntimeError(f\"Metadata for file", "with open(utils.get_fname_md5(output_file), 'wb') as f: # save good md5 in aux file for", "file_name[:-len(ext_to_remove)] name, ext = os.path.splitext(os.path.basename(file_name)) genomic_range = '' if DataFile.is_genomic_range(genomic_range_args): genomic_range = \"_genomic_range_\"", "(2 ** 30):.2f} GiB\") logging.info(f\"Used space : {hdd.used / (2 ** 30):.2f} GiB\")", "if DataFile.is_genomic_range(genomic_range_args): genomic_range = \"_genomic_range_\" + (genomic_range_args[0] or genomic_range_args[1]) genomic_range += '_' +", "max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait) num_retries += 1 logging.info(f\"retry attempt {num_retries}\")", "does not have access to the file then the server returns HTTP code", "if file_size < 100 * 1024 * 1024: num_connections = 1 logging.info(f\"Download starting", "ConnectionError: logging.info(\"Failed to connect to data service. Check that the necessary ports are", "self._display_file_name is None: self.load_metadata() return self._display_file_name @property def name(self): if self._file_name is None:", "+ length - 1}'}) as r: with open(file_name, 'ba') as file_out: for chunk", "and len(format_ext) > 1: ext += format_ext ret_val = os.path.join(folder, self.id, name +", "self.generate_output_filename(output_dir, genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") if not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd())", "and utils.md5(output_file, file_size) == check_sum: DataFile.print_local_file_info('Local file exists:', output_file, check_sum) return num_connections =", "+ (genomic_range_args[0] or genomic_range_args[1]) genomic_range += '_' + (str(genomic_range_args[2]) or '0') genomic_range +=", "file) file_id = match.group(1) file_from = match.group(2) file_length = match.group(3) if file_id !=", "if downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum or '')) !=", "want to download is bigger than your free space in this \" f\"location\")", "urllib.parse.urlencode(options) final_file_name = f'{file_name}-from-{str(start_pos)}-len-{str(length)}.slice' file_name = final_file_name + '.tmp' self.temporary_files.add(file_name) existing_size = os.stat(file_name).st_size", "download is bigger than your free space in this \" f\"location\") if DataFile.is_genomic_range(genomic_range_args):", "self.data_client = data_client self.id = file_id self.temporary_files = set() self._display_file_name = display_file_name self._file_name", "htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries < 0", "True except Exception as e: if e is ConnectionError: logging.info(\"Failed to connect to", "for file id '{self.id}' could not be retrieved. \" + \"This is probably", "file)) results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name in executor.map(self.download_file_slice_, params):", "part_file_name in executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close() downloaded_file_total_size = sum(os.path.getsize(f) for f in results)", "for future re-use f.write(received_file_md5.encode()) else: os.remove(output_file) raise Exception(f\"Download process expected md5 value '{check_sum}'", "if pbar: pbar.update(existing_size) if existing_size == length: return file_name try: with self.data_client.get_stream(path, {", ": {hdd.free / (2 ** 30):.2f} GiB\") # If file is bigger than", "output: htsget.get( f\"{self.data_client.htsget_url}/files/{self.id}\", output, reference_name=genomic_range_args[0], reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries <", "self._file_name = res['fileName'] self._file_size = res['fileSize'] self._unencrypted_checksum = res['unencryptedChecksum'] self._file_status = res['fileStatus'] @property", "30):.2f} GiB\") logging.info(f\"Free space : {hdd.free / (2 ** 30):.2f} GiB\") # If", "but the JSON payload has # all the fields empty if res['displayFileName'] is", "is None or res['unencryptedChecksum'] is None: raise RuntimeError(f\"Metadata for file id '{self.id}' could", "genomic_range_args[0] is not None or genomic_range_args[1] is not None def generate_output_filename(self, folder, genomic_range_args):", "not None def generate_output_filename(self, folder, genomic_range_args): file_name = self.display_name ext_to_remove = \".cip\" if", "0 else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to : ', output_file, genomic_range_args) return done", ": {hdd.used / (2 ** 30):.2f} GiB\") logging.info(f\"Free space : {hdd.free / (2", "ext += format_ext ret_val = os.path.join(folder, self.id, name + genomic_range + ext) logging.debug(f\"Output", "an individual file\"\"\" file_size = self.size check_sum = self.unencrypted_checksum options = {\"destinationFormat\": \"plain\"}", "start_pos, length, options=None, pbar=None): if start_pos < 0: raise ValueError(\"start : must be", "os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar: params = [ (os.path.join(temporary_directory, self.id),", "genomic_range_args: return False return genomic_range_args[0] is not None or genomic_range_args[1] is not None", "res['unencryptedChecksum'] is None: raise RuntimeError(f\"Metadata for file id '{self.id}' could not be retrieved.", "connection(s), file size {file_size} and chunk \" f\"length {max_slice_size}]...\") chunk_len = max_slice_size temporary_directory", "access to at \" \"'https://ega-archive.org/my-datasets.php' after logging in.\") self._display_file_name = res['displayFileName'] self._file_name =", "for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar: pbar.update(len(chunk)) total_received = os.path.getsize(file_name) if total_received", "import htsget import psutil from tqdm import tqdm from pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE", "<EMAIL>\") return logging.info(f\"File Id: '{self.id}'({self.size} bytes).\") output_file = self.generate_output_filename(output_dir, genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file),", "logging.info( \"GPG files are currently not supported.\" \" Please email EGA Helpdesk at", "\"GPG files are currently not supported.\" \" Please email EGA Helpdesk at <EMAIL>\")", "starting [using {num_connections} connection(s), file size {file_size} and chunk \" f\"length {max_slice_size}]...\") chunk_len", "100 * 1024 * 1024 temporary_files_should_be_deleted = False def __init__(self, data_client, file_id, display_file_name=None,", "- chunk_start_pos), options, pbar) for chunk_start_pos in range(0, file_size, chunk_len)] for file in", "you want to download is bigger than your free space in this \"", "if res['displayFileName'] is None or res['unencryptedChecksum'] is None: raise RuntimeError(f\"Metadata for file id", "\".tmp_download\") os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar: params = [ (os.path.join(temporary_directory,", "after logging in.\") self._display_file_name = res['displayFileName'] self._file_name = res['fileName'] self._file_size = res['fileSize'] self._unencrypted_checksum", "genomic_range + ext) logging.debug(f\"Output file:'{ret_val}'\") return ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info(", "def print_local_file_info_genomic_range(prefix_str, file, gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\"", "= 100 * 1024 * 1024 temporary_files_should_be_deleted = False def __init__(self, data_client, file_id,", "exist_ok=True) with tqdm(total=int(file_size), unit='B', unit_scale=True) as pbar: params = [ (os.path.join(temporary_directory, self.id), chunk_start_pos,", "import psutil from tqdm import tqdm from pyega3.libs import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32", "logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\") def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE): \"\"\"Download an individual file\"\"\" file_size", "logging.info(f\"Total space : {hdd.total / (2 ** 30):.2f} GiB\") logging.info(f\"Used space : {hdd.used", "if max_retries < 0 else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to : ', output_file,", "Id: '{self.id}'({self.size} bytes).\") output_file = self.generate_output_filename(output_dir, genomic_range_args) temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") if not", "= self.unencrypted_checksum options = {\"destinationFormat\": \"plain\"} file_size -= 16 # 16 bytes IV", "have access to the file then the server returns HTTP code 200 but", "in your \" \"firewall. See the documentation for more information.\") logging.exception(e) if num_retries", "= match.group(1) file_from = match.group(2) file_length = match.group(3) if file_id != self.id: continue", "must be positive\") path = f\"/files/{self.id}\" if options is not None: path +=", "num_retries += 1 logging.info(f\"retry attempt {num_retries}\") def delete_temporary_folder(self, temporary_directory): try: shutil.rmtree(temporary_directory) except FileNotFoundError", "self.id: continue if (file_from, file_length) in [(param[1], param[2]) for param in params]: continue", "\"firewall. See the documentation for more information.\") logging.exception(e) if num_retries == max_retries: if", "param in params]: continue logging.warning(f'Deleting the leftover {file} temporary file because the MAX_SLICE_SIZE", "check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved to : ', output_file, check_sum) if not_valid_server_md5: logging.info( f\"WARNING:", "self.load_metadata() return self._file_name @property def size(self): if self._file_size is None: self.load_metadata() return self._file_size", "logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\" ) def download_file_retry(self, num_connections,", "chunk_len = max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size), unit='B', unit_scale=True)", "are currently not supported.\" \" Please email EGA Helpdesk at <EMAIL>\") return logging.info(f\"File", "if self.name.endswith(\".gpg\"): logging.info( \"GPG files are currently not supported.\" \" Please email EGA", "JSON payload has # all the fields empty if res['displayFileName'] is None or", "\" + \"This is probably because your account does not have access to", "genomic_range_args): file_name = self.display_name ext_to_remove = \".cip\" if file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)] name,", "urllib import htsget import psutil from tqdm import tqdm from pyega3.libs import utils", "f\" Can't validate download. Please contact EGA helpdesk on <EMAIL>\") with open(utils.get_fname_md5(output_file), 'wb')", "', output_file, check_sum) if not_valid_server_md5: logging.info( f\"WARNING: Unable to obtain valid MD5 from", "+ \"This is probably because your account does not have access to this", "process expected md5 value '{check_sum}' but got '{received_file_md5}'\") def download_file_slice_(self, args): return self.download_file_slice(*args)", "= self.size check_sum = self.unencrypted_checksum options = {\"destinationFormat\": \"plain\"} file_size -= 16 #", "= re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id = match.group(1) file_from = match.group(2) file_length = match.group(3) if", "reference_md5=genomic_range_args[1], start=genomic_range_args[2], end=genomic_range_args[3], data_format=genomic_range_args[4], max_retries=sys.maxsize if max_retries < 0 else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token)", "- 1}'}) as r: with open(file_name, 'ba') as file_out: for chunk in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE):", "retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to : ', output_file, genomic_range_args) return done = False num_retries", "\".cip\" if file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)] name, ext = os.path.splitext(os.path.basename(file_name)) genomic_range = ''", "pbar: pbar.update(len(chunk)) total_received = os.path.getsize(file_name) if total_received != length: raise Exception(f\"Slice error: received={total_received},", "if length <= 0: raise ValueError(\"length : must be positive\") path = f\"/files/{self.id}\"", "file_length) in [(param[1], param[2]) for param in params]: continue logging.warning(f'Deleting the leftover {file}", "space : {hdd.total / (2 ** 30):.2f} GiB\") logging.info(f\"Used space : {hdd.used /", "range(0, file_size, chunk_len)] for file in os.listdir(temporary_directory): match = re.match(r\"(.*)-from-(\\d*)-len-(\\d*).*\", file) file_id =", "self._file_size = size self._unencrypted_checksum = unencrypted_checksum self._file_status = status def load_metadata(self): res =", "warning if hdd.free < self.size: logging.warning(f\"The size of the file that you want", "self.display_name ext_to_remove = \".cip\" if file_name.endswith(ext_to_remove): file_name = file_name[:-len(ext_to_remove)] name, ext = os.path.splitext(os.path.basename(file_name))", "file size {file_size} and chunk \" f\"length {max_slice_size}]...\") chunk_len = max_slice_size temporary_directory =", "aux file for future re-use f.write(received_file_md5.encode()) else: os.remove(output_file) raise Exception(f\"Download process expected md5", "self.id, name + genomic_range + ext) logging.debug(f\"Output file:'{ret_val}'\") return ret_val @staticmethod def print_local_file_info_genomic_range(prefix_str,", "md5 in aux file for future re-use f.write(received_file_md5.encode()) else: os.remove(output_file) raise Exception(f\"Download process", "files are currently not supported.\" \" Please email EGA Helpdesk at <EMAIL>\") return", "False num_retries = 0 while not done: try: self.download_file(output_file, num_connections, max_slice_size) done =", "= utils.md5(output_file, file_size) logging.info(\"Verifying file checksum\") if received_file_md5 == check_sum or not_valid_server_md5: DataFile.print_local_file_info('Saved", "if self._file_name is None: self.load_metadata() return self._file_name @property def size(self): if self._file_size is", "(2 ** 30):.2f} GiB\") # If file is bigger than free space, warning", "raise ValueError(\"length : must be positive\") path = f\"/files/{self.id}\" if options is not", "30):.2f} GiB\") logging.info(f\"Used space : {hdd.used / (2 ** 30):.2f} GiB\") logging.info(f\"Free space", "self._file_status = status def load_metadata(self): res = self.data_client.get_json(f\"/metadata/files/{self.id}\") # If the user does", "in r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar: pbar.update(len(chunk)) total_received = os.path.getsize(file_name) if total_received != length:", "access to this file. \" \"You can check which datasets your account has", "ext = os.path.splitext(os.path.basename(file_name)) genomic_range = '' if DataFile.is_genomic_range(genomic_range_args): genomic_range = \"_genomic_range_\" + (genomic_range_args[0]", "file, gr_args): logging.info( f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, referenceName={gr_args[0]},\" f\" referenceMD5={gr_args[1]}, start={gr_args[2]}, end={gr_args[3]}, format={gr_args[4]})\" ) def", "ext and len(format_ext) > 1: ext += format_ext ret_val = os.path.join(folder, self.id, name", "len(str(check_sum or '')) != 32 logging.info(\"Calculating md5 (this operation can take a long", "= os.path.getsize(file_name) if total_received != length: raise Exception(f\"Slice error: received={total_received}, requested={length}, file='{file_name}'\") except", "'') format_ext = '.' + (genomic_range_args[4] or '').strip().lower() if format_ext != ext and", "to obtain valid MD5 from the server (received: {check_sum}).\" f\" Can't validate download.", "num_retries == max_retries: if DataFile.temporary_files_should_be_deleted: self.delete_temporary_folder(temporary_directory) raise e time.sleep(retry_wait) num_retries += 1 logging.info(f\"retry", "if pbar: pbar.update(len(chunk)) total_received = os.path.getsize(file_name) if total_received != length: raise Exception(f\"Slice error:", "final_file_name @staticmethod def is_genomic_range(genomic_range_args): if not genomic_range_args: return False return genomic_range_args[0] is not", "EGA Helpdesk at <EMAIL>\") return logging.info(f\"File Id: '{self.id}'({self.size} bytes).\") output_file = self.generate_output_filename(output_dir, genomic_range_args)", "to at \" \"'https://ega-archive.org/my-datasets.php' after logging in.\") self._display_file_name = res['displayFileName'] self._file_name = res['fileName']", "be retrieved. \" + \"This is probably because your account does not have", "'{check_sum}' but got '{received_file_md5}'\") def download_file_slice_(self, args): return self.download_file_slice(*args) def download_file_slice(self, file_name, start_pos,", "not done: try: self.download_file(output_file, num_connections, max_slice_size) done = True except Exception as e:", "# If the user does not have access to the file then the", "\".tmp_download\") if not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd()) logging.info(f\"Total space : {hdd.total /", "format_ext = '.' + (genomic_range_args[4] or '').strip().lower() if format_ext != ext and len(format_ext)", "= os.path.join(os.path.dirname(output_file), \".tmp_download\") if not os.path.exists(temporary_directory): os.makedirs(temporary_directory) hdd = psutil.disk_usage(os.getcwd()) logging.info(f\"Total space :", "(2 ** 30):.2f} GiB\") logging.info(f\"Free space : {hdd.free / (2 ** 30):.2f} GiB\")", "in executor.map(self.download_file_slice_, params): results.append(part_file_name) pbar.close() downloaded_file_total_size = sum(os.path.getsize(f) for f in results) if", "if start_pos < 0: raise ValueError(\"start : must be positive\") if length <=", "16 bytes IV not necesary in plain mode if os.path.exists(output_file) and utils.md5(output_file, file_size)", "@staticmethod def print_local_file_info(prefix_str, file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\") def download_file(self, output_file, num_connections=1, max_slice_size=DEFAULT_SLICE_SIZE):", "if file_id != self.id: continue if (file_from, file_length) in [(param[1], param[2]) for param", "r.iter_content(DOWNLOAD_FILE_MEMORY_BUFFER_SIZE): file_out.write(chunk) if pbar: pbar.update(len(chunk)) total_received = os.path.getsize(file_name) if total_received != length: raise", "that you want to download is bigger than your free space in this", "run.') os.remove(os.path.join(temporary_directory, file)) results = [] with concurrent.futures.ThreadPoolExecutor(max_workers=num_connections) as executor: for part_file_name in", "downloaded_file_total_size = sum(os.path.getsize(f) for f in results) if downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file, results,", "pbar.close() downloaded_file_total_size = sum(os.path.getsize(f) for f in results) if downloaded_file_total_size == file_size: utils.merge_bin_files_on_disk(output_file,", "30):.2f} GiB\") # If file is bigger than free space, warning if hdd.free", "import utils DOWNLOAD_FILE_MEMORY_BUFFER_SIZE = 32 * 1024 class DataFile: DEFAULT_SLICE_SIZE = 100 *", "then the server returns HTTP code 200 but the JSON payload has #", "== file_size: utils.merge_bin_files_on_disk(output_file, results, downloaded_file_total_size) not_valid_server_md5 = len(str(check_sum or '')) != 32 logging.info(\"Calculating", "os.remove(file_name) if pbar: pbar.update(existing_size) if existing_size == length: return file_name try: with self.data_client.get_stream(path,", "else: os.remove(output_file) raise Exception(f\"Download process expected md5 value '{check_sum}' but got '{received_file_md5}'\") def", "bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to : ', output_file, genomic_range_args) return done = False num_retries =", "except Exception: if os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name, final_file_name) return final_file_name @staticmethod def is_genomic_range(genomic_range_args):", "or genomic_range_args[1] is not None def generate_output_filename(self, folder, genomic_range_args): file_name = self.display_name ext_to_remove", "logging.info(f\"Free space : {hdd.free / (2 ** 30):.2f} GiB\") # If file is", "= status def load_metadata(self): res = self.data_client.get_json(f\"/metadata/files/{self.id}\") # If the user does not", "because your account does not have access to this file. \" \"You can", "long time depending on the file size)\") received_file_md5 = utils.md5(output_file, file_size) logging.info(\"Verifying file", "than free space, warning if hdd.free < self.size: logging.warning(f\"The size of the file", "max_retries < 0 else max_retries, retry_wait=retry_wait, bearer_token=self.data_client.auth_client.token) DataFile.print_local_file_info_genomic_range('Saved to : ', output_file, genomic_range_args)", "1024 * 1024 temporary_files_should_be_deleted = False def __init__(self, data_client, file_id, display_file_name=None, file_name=None, size=None,", "return self._unencrypted_checksum @property def status(self): if self._file_status is None: self.load_metadata() return self._file_status @staticmethod", "1024 temporary_files_should_be_deleted = False def __init__(self, data_client, file_id, display_file_name=None, file_name=None, size=None, unencrypted_checksum=None, status=None):", "self._file_status @staticmethod def print_local_file_info(prefix_str, file, md5): logging.info(f\"{prefix_str}'{os.path.abspath(file)}'({os.path.getsize(file)} bytes, md5={md5})\") def download_file(self, output_file, num_connections=1,", "\"\"\"Download an individual file\"\"\" file_size = self.size check_sum = self.unencrypted_checksum options = {\"destinationFormat\":", "f\"/files/{self.id}\" if options is not None: path += '?' + urllib.parse.urlencode(options) final_file_name =", "re-use f.write(received_file_md5.encode()) else: os.remove(output_file) raise Exception(f\"Download process expected md5 value '{check_sum}' but got", "self.unencrypted_checksum options = {\"destinationFormat\": \"plain\"} file_size -= 16 # 16 bytes IV not", "32 * 1024 class DataFile: DEFAULT_SLICE_SIZE = 100 * 1024 * 1024 temporary_files_should_be_deleted", "False def __init__(self, data_client, file_id, display_file_name=None, file_name=None, size=None, unencrypted_checksum=None, status=None): self.data_client = data_client", "received={total_received}, requested={length}, file='{file_name}'\") except Exception: if os.path.exists(file_name): os.remove(file_name) raise os.rename(file_name, final_file_name) return final_file_name", "but got '{received_file_md5}'\") def download_file_slice_(self, args): return self.download_file_slice(*args) def download_file_slice(self, file_name, start_pos, length,", "in plain mode if os.path.exists(output_file) and utils.md5(output_file, file_size) == check_sum: DataFile.print_local_file_info('Local file exists:',", "f\"length {max_slice_size}]...\") chunk_len = max_slice_size temporary_directory = os.path.join(os.path.dirname(output_file), \".tmp_download\") os.makedirs(temporary_directory, exist_ok=True) with tqdm(total=int(file_size)," ]
[ "<filename>bot/utils/__init__.py from . import db_api from . import misc from .notify_admins import on_startup_notify" ]
[ "sel.clear() # sel.add_all(subs) return subs def convert_to_time(sub): h, m, s = sub.split(':') return", "3600) / 60) s = time % 60 return str(h).zfill(2) + ':' +", "':' + str(m).zfill(2) + ':' + (\"%.3f\" % s).zfill(6).replace('.', ',') class SubtitleSyncCommand(sublime_plugin.TextCommand): def", "+= delta if time < 0: time = 0 time = convert_to_string(time) self.view.replace(edit,", "sel.contains(match): subs.append(match) # sel.clear() # sel.add_all(subs) return subs def convert_to_time(sub): h, m, s", "convert_to_time(self.view.substr(sub)) time += delta if time < 0: time = 0 time =", "find_subtitles(view): subs = [] sel = view.sel() for match in view.find_all(SUB_RE): if sel.contains(match):", "convert_to_string(time): h = int(time / 3600) m = int((time % 3600) / 60)", "3600) m = int((time % 3600) / 60) s = time % 60", "time % 60 return str(h).zfill(2) + ':' + str(m).zfill(2) + ':' + (\"%.3f\"", "import sublime SUB_RE = '\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d' def find_subtitles(view): subs = [] sel = view.sel()", "def run(self, edit, delta): subs = find_subtitles(self.view) for sub in subs: # self.view.sel():", "+ float(s.replace(',', '.')) def convert_to_string(time): h = int(time / 3600) m = int((time", "def convert_to_time(sub): h, m, s = sub.split(':') return int(h) * 3600 + int(m)", "s).zfill(6).replace('.', ',') class SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self, edit, delta): subs = find_subtitles(self.view) for sub", "int((time % 3600) / 60) s = time % 60 return str(h).zfill(2) +", "/ 3600) m = int((time % 3600) / 60) s = time %", "if sel.contains(match): subs.append(match) # sel.clear() # sel.add_all(subs) return subs def convert_to_time(sub): h, m,", "int(time / 3600) m = int((time % 3600) / 60) s = time", "time += delta if time < 0: time = 0 time = convert_to_string(time)", "+ ':' + str(m).zfill(2) + ':' + (\"%.3f\" % s).zfill(6).replace('.', ',') class SubtitleSyncCommand(sublime_plugin.TextCommand):", "SUB_RE = '\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d' def find_subtitles(view): subs = [] sel = view.sel() for match", "edit, delta): subs = find_subtitles(self.view) for sub in subs: # self.view.sel(): time =", "= [] sel = view.sel() for match in view.find_all(SUB_RE): if sel.contains(match): subs.append(match) #", "delta if time < 0: time = 0 time = convert_to_string(time) self.view.replace(edit, sub,", "subs def convert_to_time(sub): h, m, s = sub.split(':') return int(h) * 3600 +", "60 + float(s.replace(',', '.')) def convert_to_string(time): h = int(time / 3600) m =", "return int(h) * 3600 + int(m) * 60 + float(s.replace(',', '.')) def convert_to_string(time):", "+ ':' + (\"%.3f\" % s).zfill(6).replace('.', ',') class SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self, edit, delta):", "[] sel = view.sel() for match in view.find_all(SUB_RE): if sel.contains(match): subs.append(match) # sel.clear()", "h = int(time / 3600) m = int((time % 3600) / 60) s", "int(m) * 60 + float(s.replace(',', '.')) def convert_to_string(time): h = int(time / 3600)", "60) s = time % 60 return str(h).zfill(2) + ':' + str(m).zfill(2) +", "import sublime_plugin import sublime SUB_RE = '\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d' def find_subtitles(view): subs = [] sel", "sel = view.sel() for match in view.find_all(SUB_RE): if sel.contains(match): subs.append(match) # sel.clear() #", "= convert_to_time(self.view.substr(sub)) time += delta if time < 0: time = 0 time", "if time < 0: time = 0 time = convert_to_string(time) self.view.replace(edit, sub, time)", "match in view.find_all(SUB_RE): if sel.contains(match): subs.append(match) # sel.clear() # sel.add_all(subs) return subs def", "def find_subtitles(view): subs = [] sel = view.sel() for match in view.find_all(SUB_RE): if", "sub.split(':') return int(h) * 3600 + int(m) * 60 + float(s.replace(',', '.')) def", "# sel.clear() # sel.add_all(subs) return subs def convert_to_time(sub): h, m, s = sub.split(':')", "(\"%.3f\" % s).zfill(6).replace('.', ',') class SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self, edit, delta): subs = find_subtitles(self.view)", "subs: # self.view.sel(): time = convert_to_time(self.view.substr(sub)) time += delta if time < 0:", "# sel.add_all(subs) return subs def convert_to_time(sub): h, m, s = sub.split(':') return int(h)", "subs = find_subtitles(self.view) for sub in subs: # self.view.sel(): time = convert_to_time(self.view.substr(sub)) time", "'.')) def convert_to_string(time): h = int(time / 3600) m = int((time % 3600)", "# self.view.sel(): time = convert_to_time(self.view.substr(sub)) time += delta if time < 0: time", "return str(h).zfill(2) + ':' + str(m).zfill(2) + ':' + (\"%.3f\" % s).zfill(6).replace('.', ',')", "* 60 + float(s.replace(',', '.')) def convert_to_string(time): h = int(time / 3600) m", "sublime SUB_RE = '\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d' def find_subtitles(view): subs = [] sel = view.sel() for", "sub in subs: # self.view.sel(): time = convert_to_time(self.view.substr(sub)) time += delta if time", "= int((time % 3600) / 60) s = time % 60 return str(h).zfill(2)", "* 3600 + int(m) * 60 + float(s.replace(',', '.')) def convert_to_string(time): h =", "3600 + int(m) * 60 + float(s.replace(',', '.')) def convert_to_string(time): h = int(time", "int(h) * 3600 + int(m) * 60 + float(s.replace(',', '.')) def convert_to_string(time): h", "= view.sel() for match in view.find_all(SUB_RE): if sel.contains(match): subs.append(match) # sel.clear() # sel.add_all(subs)", "for match in view.find_all(SUB_RE): if sel.contains(match): subs.append(match) # sel.clear() # sel.add_all(subs) return subs", "str(h).zfill(2) + ':' + str(m).zfill(2) + ':' + (\"%.3f\" % s).zfill(6).replace('.', ',') class", "self.view.sel(): time = convert_to_time(self.view.substr(sub)) time += delta if time < 0: time =", "class SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self, edit, delta): subs = find_subtitles(self.view) for sub in subs:", "= time % 60 return str(h).zfill(2) + ':' + str(m).zfill(2) + ':' +", "= find_subtitles(self.view) for sub in subs: # self.view.sel(): time = convert_to_time(self.view.substr(sub)) time +=", "s = time % 60 return str(h).zfill(2) + ':' + str(m).zfill(2) + ':'", "SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self, edit, delta): subs = find_subtitles(self.view) for sub in subs: #", "subs.append(match) # sel.clear() # sel.add_all(subs) return subs def convert_to_time(sub): h, m, s =", "convert_to_time(sub): h, m, s = sub.split(':') return int(h) * 3600 + int(m) *", "in view.find_all(SUB_RE): if sel.contains(match): subs.append(match) # sel.clear() # sel.add_all(subs) return subs def convert_to_time(sub):", "subs = [] sel = view.sel() for match in view.find_all(SUB_RE): if sel.contains(match): subs.append(match)", "delta): subs = find_subtitles(self.view) for sub in subs: # self.view.sel(): time = convert_to_time(self.view.substr(sub))", "sublime_plugin import sublime SUB_RE = '\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d' def find_subtitles(view): subs = [] sel =", "sel.add_all(subs) return subs def convert_to_time(sub): h, m, s = sub.split(':') return int(h) *", "return subs def convert_to_time(sub): h, m, s = sub.split(':') return int(h) * 3600", "find_subtitles(self.view) for sub in subs: # self.view.sel(): time = convert_to_time(self.view.substr(sub)) time += delta", "def convert_to_string(time): h = int(time / 3600) m = int((time % 3600) /", "str(m).zfill(2) + ':' + (\"%.3f\" % s).zfill(6).replace('.', ',') class SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self, edit,", "',') class SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self, edit, delta): subs = find_subtitles(self.view) for sub in", "h, m, s = sub.split(':') return int(h) * 3600 + int(m) * 60", "60 return str(h).zfill(2) + ':' + str(m).zfill(2) + ':' + (\"%.3f\" % s).zfill(6).replace('.',", "/ 60) s = time % 60 return str(h).zfill(2) + ':' + str(m).zfill(2)", "view.find_all(SUB_RE): if sel.contains(match): subs.append(match) # sel.clear() # sel.add_all(subs) return subs def convert_to_time(sub): h,", "+ str(m).zfill(2) + ':' + (\"%.3f\" % s).zfill(6).replace('.', ',') class SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self,", "':' + (\"%.3f\" % s).zfill(6).replace('.', ',') class SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self, edit, delta): subs", "% s).zfill(6).replace('.', ',') class SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self, edit, delta): subs = find_subtitles(self.view) for", "in subs: # self.view.sel(): time = convert_to_time(self.view.substr(sub)) time += delta if time <", "float(s.replace(',', '.')) def convert_to_string(time): h = int(time / 3600) m = int((time %", "m = int((time % 3600) / 60) s = time % 60 return", "m, s = sub.split(':') return int(h) * 3600 + int(m) * 60 +", "+ (\"%.3f\" % s).zfill(6).replace('.', ',') class SubtitleSyncCommand(sublime_plugin.TextCommand): def run(self, edit, delta): subs =", "= int(time / 3600) m = int((time % 3600) / 60) s =", "+ int(m) * 60 + float(s.replace(',', '.')) def convert_to_string(time): h = int(time /", "% 60 return str(h).zfill(2) + ':' + str(m).zfill(2) + ':' + (\"%.3f\" %", "run(self, edit, delta): subs = find_subtitles(self.view) for sub in subs: # self.view.sel(): time", "time = convert_to_time(self.view.substr(sub)) time += delta if time < 0: time = 0", "'\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d' def find_subtitles(view): subs = [] sel = view.sel() for match in view.find_all(SUB_RE):", "s = sub.split(':') return int(h) * 3600 + int(m) * 60 + float(s.replace(',',", "= '\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d' def find_subtitles(view): subs = [] sel = view.sel() for match in", "for sub in subs: # self.view.sel(): time = convert_to_time(self.view.substr(sub)) time += delta if", "= sub.split(':') return int(h) * 3600 + int(m) * 60 + float(s.replace(',', '.'))", "% 3600) / 60) s = time % 60 return str(h).zfill(2) + ':'", "view.sel() for match in view.find_all(SUB_RE): if sel.contains(match): subs.append(match) # sel.clear() # sel.add_all(subs) return" ]
[ "result.add(part) else: # Or a range in the form '1-3'. try: l, r", "the directory PATH, and all intermediate-level directories needed to contain it, unless it", "None: data = gio.step3_input() result = step3.step3(data) return gio.step3_output(result) def run_step3c(data): \"\"\"An alternative", "parse_steps(steps): \"\"\"Parse the -s, steps, option. Produces a list of strings.\"\"\" steps =", "or 1 and the default case will correctly # coerce it. elif value[0]", "produced from the previous step, or an iterator # that feeds from a", "ValueError: # Expect to catch both # \"ValueError: too many values to unpack\"", "step0 if data is None: data = gio.step0_input() result = step0.step0(data) return gio.step0_output(result)", "a range in the form '1-3'. try: l, r = part.split('-') result.update(str(s) for", "parse_options(arglist): import optparse usage = \"usage: %prog [options]\" parser = optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\",", "p.split('=', 1) except ValueError: raise Fatal(\"Can't understand parameter option: %r\" % p) if", "or an iterator # that feeds from a file. def run_step0(data): from steps", "that # use \"print\" to generate their output. logfile = sys.stdout def log(msg):", "3 that reads (copies) the output file created by the Sordinary Step 3.", "None: argv = sys.argv options, args = parse_options(argv[1:]) update_parameters(options.parameter) step_list = list(options.steps) #", "in range(int(step_list[0]), int(step_list[-1]) + 1)] if step_list == t: logit = \"STEPS %s", "data produced by Step 3 without re-running it.\"\"\" if data: raise Fatal(\"Expect to", "str(cannot)) # Create a message for stdout. if len(step_list) == 1: logit =", "parameter from parameters/*.py during run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\", default=True, dest=\"save_work\", help=\"Do not save", "be changing the value of sys.stdout before calling other modules that # use", "it; in steps 2 and 5 # we'll be changing the value of", "data = gio.step2_input() result = step2.step2(data) return gio.step2_output(result) def run_step3(data): from steps import", "the command line and update the parameters module.\"\"\" if not parm: return import", "if data is None: data = gio.step2_input() result = step2.step2(data) return gio.step2_output(result) def", "time.time() cannot = [s for s in step_list if s not in step_fn]", "of steps to run\") parser.add_option('-p', '--parameter', action='append', help=\"Redefine parameter from parameters/*.py during run\")", "'(' and value[-1] == ')': value = value[1:-1] value = [int(x) for x", "= gio.step3_input() result = step3.step3(data) return gio.step3_output(result) def run_step3c(data): \"\"\"An alternative to Step", "1 and the default case will correctly # coerce it. elif value[0] ==", "None for step in step_list: data = step_fn[step](data) # Consume the data in", "needed to contain it, unless it already exists.\"\"\" if not os.path.isdir(path): log(\"... creating", "in the work sub-directory\") options, args = parser.parse_args(arglist) if len(args) != 0: parser.error(\"Unexpected", "procedure must be run from the root \" \"directory of the project.\\nPlease change", "None: data = gio.step2_input() result = step2.step2(data) return gio.step2_output(result) def run_step3(data): from steps", "Print this text. --steps=STEPS Specify which steps to run, as a comma-separated list", "can log to it; in steps 2 and 5 # we'll be changing", "run_step3c(data): \"\"\"An alternative to Step 3 that reads (copies) the output file created", "= step3.step3(data) return gio.step3_output(result) def run_step3c(data): \"\"\"An alternative to Step 3 that reads", "contain it, unless it already exists.\"\"\" if not os.path.isdir(path): log(\"... creating directory %s\"", "gio.step4_output(result) def run_step5(data): from steps import step5 # Step 5 takes a land", "previous step, or an iterator # that feeds from a file. def run_step0(data):", "def run_step1(data): from steps import step1 from extension import step1 as estep1 if", "gio.step5_input(data) result = step5.step5(data) return gio.step5_output(result) def parse_steps(steps): \"\"\"Parse the -s, steps, option.", "type(x) == bool: try: value = ['false', 'true'].index(value.lower()) except ValueError: raise Fatal(\"Boolean parameter", "split # produces too many values (\"1-3-\"), and # \"ValueError: invalid literal for", "fetch fetcher = fetch.Fetcher() fetcher.fetch() def main(argv=None): import time import os if argv", "Consume the data in whatever the last step was, in order to #", "step4.step4(data) return gio.step4_output(result) def run_step5(data): from steps import step5 # Step 5 takes", "parm: return import parameters for p in parm: try: key, value = p.split('=',", "below takes a data object, its input, # and produces a data object,", "unknown parameter %r\" % key) # Coerce value, a string, to the same", "is all handled in # the step5_input() function. data = gio.step5_input(data) result =", "the whole # pipeline. for _ in data: pass end_time = time.time() log(\"====>", "\"\"\"Parse the -s, steps, option. Produces a list of strings.\"\"\" steps = steps.strip()", "to the same type as the existing parameter # value. That works nicely", "be integer number with an optional letter suffix... if re.match(r'^\\d+[a-z]?$', part): result.add(part) else:", "this is all handled in # the step5_input() function. data = gio.step5_input(data) result", "= time.time() cannot = [s for s in step_list if s not in", "def run_step3(data): from steps import step3 if data is None: data = gio.step3_input()", "result = step2.step2(data) return gio.step2_output(result) def run_step3(data): from steps import step3 if data", "a string, to the same type as the existing parameter # value. That", "estep1 if data is None: data = gio.step1_input() pre = estep1.pre_step1(data) result =", "/tmp/input will be used. dl_input_files() step_fn = { '0': run_step0, '1': run_step1, '2':", "result.update(str(s) for s in range(int(l), int(r) + 1)) except ValueError: # Expect to", "help=\"Do not save intermediate files in the work sub-directory\") options, args = parser.parse_args(arglist)", "options, args = parser.parse_args(arglist) if len(args) != 0: parser.error(\"Unexpected arguments\") options.steps = parse_steps(options.steps)", "for _ in data: pass end_time = time.time() log(\"====> Timing Summary ====\") log(\"Run", "http://www.python.org/doc/2.4.4/lib/module-os.html import os # http://docs.python.org/release/2.4.4/lib/module-re.html import re # http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try: rootdir", "2016-01-06 \"\"\"run.cgi [options] -- run steps of the GISTEMP algorithm. Options: --help Print", "and # is zipped up. data = gio.step4_input(data) result = step4.step4(data) return gio.step4_output(result)", "!= 0: parser.error(\"Unexpected arguments\") options.steps = parse_steps(options.steps) return options, args def update_parameters(parm): \"\"\"Take", "3 without re-running it.\"\"\" if data: raise Fatal(\"Expect to run 3c first in", "[options]\" parser = optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\", action=\"store\", metavar=\"S[,S]\", default=\"\", help=\"Select range of steps", "overwrite progress popup if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR + \"progress.txt\", 'w')", "[str(x) for x in range(6)] result = set() for part in steps.split(','): #", "== bool: try: value = ['false', 'true'].index(value.lower()) except ValueError: raise Fatal(\"Boolean parameter %r", "not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR + \"progress.txt\", 'w') progress.write(\"Setting up parameters...\\n\\n\") #", "\"--steps\", action=\"store\", metavar=\"S[,S]\", default=\"\", help=\"Select range of steps to run\") parser.add_option('-p', '--parameter', action='append',", "value = value[1:-1] value = [int(x) for x in value.split(',')] value = type(x)(value)", "os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR + \"progress.txt\", 'w') progress.write(\"Setting up parameters...\\n\\n\") # Create all", "s in range(int(step_list[0]), int(step_list[-1]) + 1)] if step_list == t: logit = \"STEPS", "progress.flush() def mkdir(path): \"\"\"mkdir(PATH): create the directory PATH, and all intermediate-level directories needed", "def run_step2(data): from steps import step2 if data is None: data = gio.step2_input()", "<NAME>, 2009-12-08 # <NAME>, Revision 2016-01-06 \"\"\"run.cgi [options] -- run steps of the", "steps.split(','): # Part can be integer number with an optional letter suffix... if", "intermediate files in the work sub-directory\") options, args = parser.parse_args(arglist) if len(args) !=", "arguments\") options.steps = parse_steps(options.steps) return options, args def update_parameters(parm): \"\"\"Take a parameter string", "====\") log(\"Run took %.1f seconds\" % (end_time - start_time)) return 0 if __name__", "Each of the run_stepN functions below takes a data object, its input, #", "args = parser.parse_args(arglist) if len(args) != 0: parser.error(\"Unexpected arguments\") options.steps = parse_steps(options.steps) return", "4 always gets input data, ocean # temperatures, from disk; data from earlier", "steps are run in the order you specify. If this option is omitted,", "int(r) + 1)) except ValueError: # Expect to catch both # \"ValueError: too", "step5.step5(data) return gio.step5_output(result) def parse_steps(steps): \"\"\"Parse the -s, steps, option. Produces a list", "steps import step3 if data is None: data = gio.step3_input() result = step3.step3(data)", "try: l, r = part.split('-') result.update(str(s) for s in range(int(l), int(r) + 1))", "5 # we'll be changing the value of sys.stdout before calling other modules", "run_step1(data): from steps import step1 from extension import step1 as estep1 if data", "1)) except ValueError: # Expect to catch both # \"ValueError: too many values", "was, in order to # write its output, and hence suck data through", "strings.\"\"\" steps = steps.strip() if not steps: return [str(x) for x in range(6)]", "is omitted, run all steps in order. \"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html import os #", "number with an optional letter suffix... if re.match(r'^\\d+[a-z]?$', part): result.add(part) else: # Or", "if len(args) != 0: parser.error(\"Unexpected arguments\") options.steps = parse_steps(options.steps) return options, args def", "fetcher = fetch.Fetcher() fetcher.fetch() def main(argv=None): import time import os if argv is", "= list(options.steps) # overwrite progress popup if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR", "return options, args def update_parameters(parm): \"\"\"Take a parameter string from the command line", "last step was, in order to # write its output, and hence suck", "in step_list: data = step_fn[step](data) # Consume the data in whatever the last", "# http://docs.python.org/release/2.4.4/lib/module-re.html import re # http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if", "-- run steps of the GISTEMP algorithm. Options: --help Print this text. --steps=STEPS", "argument.\") return list(sorted(result)) def parse_options(arglist): import optparse usage = \"usage: %prog [options]\" parser", "result = step5.step5(data) return gio.step5_output(result) def parse_steps(steps): \"\"\"Parse the -s, steps, option. Produces", "len(step_list) == 1: logit = \"STEP %s\" % step_list[0] else: assert len(step_list) >=", "for strings, ints, and floats... x = getattr(parameters, key) # ... but we", "parser.parse_args(arglist) if len(args) != 0: parser.error(\"Unexpected arguments\") options.steps = parse_steps(options.steps) return options, args", "%r\" % p) if not hasattr(parameters, key): raise Fatal(\"Ignoring unknown parameter %r\" %", "order to # write its output, and hence suck data through the whole", "optparse usage = \"usage: %prog [options]\" parser = optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\", action=\"store\", metavar=\"S[,S]\",", "value. That works nicely for strings, ints, and floats... x = getattr(parameters, key)", "not parm: return import parameters for p in parm: try: key, value =", "will be used. dl_input_files() step_fn = { '0': run_step0, '1': run_step1, '2': run_step2,", "an iterator # that feeds from a file. def run_step0(data): from steps import", "from steps import step2 if data is None: data = gio.step2_input() result =", "print(\"The GISTEMP procedure must be run from the root \" \"directory of the", "\"\"\"An alternative to Step 3 that reads (copies) the output file created by", "invalid literal for int() with base 10: 'a'\" # when int fails (\"1,a\")", "again.\" % rootdir) sys.exit() except: sys.exit() sys.path.append(os.getcwd()) from settings import * # Clear", "\"--suppress-work-files\", action=\"store_false\", default=True, dest=\"save_work\", help=\"Do not save intermediate files in the work sub-directory\")", "= getattr(parameters, key) # ... but we need a hack for bool. if", "coerce it. elif value[0] == '(' and value[-1] == ')': value = value[1:-1]", "the value of sys.stdout before calling other modules that # use \"print\" to", "and try again.\" % rootdir) sys.exit() except: sys.exit() sys.path.append(os.getcwd()) from settings import *", "we can log to it; in steps 2 and 5 # we'll be", "from a file. def run_step0(data): from steps import step0 if data is None:", "parameter # value. That works nicely for strings, ints, and floats... x =", "from parameters/*.py during run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\", default=True, dest=\"save_work\", help=\"Do not save intermediate", "is None: data = gio.step2_input() result = step2.step2(data) return gio.step2_output(result) def run_step3(data): from", "# run.cgi -- run steps of the GISTEMP algorithm # # <NAME>, 2009-12-08", "as the existing parameter # value. That works nicely for strings, ints, and", "otherwise the files in /tmp/input will be used. dl_input_files() step_fn = { '0':", "--steps=STEPS Specify which steps to run, as a comma-separated list of numbers from", "the previous step, or an iterator # that feeds from a file. def", "Step 3 that reads (copies) the output file created by the Sordinary Step", "that reads (copies) the output file created by the Sordinary Step 3. Effectively", "data from earlier stages is land data and # is zipped up. data", "len(args) != 0: parser.error(\"Unexpected arguments\") options.steps = parse_steps(options.steps) return options, args def update_parameters(parm):", "# Now value is 0 or 1 and the default case will correctly", "None: data = gio.step0_input() result = step0.step0(data) return gio.step0_output(result) def run_step1(data): from steps", "%s and try again.\" % rootdir) sys.exit() except: sys.exit() sys.path.append(os.getcwd()) from settings import", "stages is land data and # is zipped up. data = gio.step4_input(data) result", "it already exists.\"\"\" if not os.path.isdir(path): log(\"... creating directory %s\" % path) os.makedirs(path)", "if re.match(r'^\\d+[a-z]?$', part): result.add(part) else: # Or a range in the form '1-3'.", "you specify. If this option is omitted, run all steps in order. \"\"\"", "run_step2(data): from steps import step2 if data is None: data = gio.step2_input() result", "in steps.split(','): # Part can be integer number with an optional letter suffix...", "line and update the parameters module.\"\"\" if not parm: return import parameters for", "Summary ====\") log(\"Run took %.1f seconds\" % (end_time - start_time)) return 0 if", "step_fn[step](data) # Consume the data in whatever the last step was, in order", "steps 2 and 5 # we'll be changing the value of sys.stdout before", "# delete files in /tmp/input to re-download the input data files # otherwise", "import step0 if data is None: data = gio.step0_input() result = step0.step0(data) return", "key) # ... but we need a hack for bool. if type(x) ==", "bool. if type(x) == bool: try: value = ['false', 'true'].index(value.lower()) except ValueError: raise", "# Consume the data in whatever the last step was, in order to", "both # \"ValueError: too many values to unpack\" when the split # produces", "data files # otherwise the files in /tmp/input will be used. dl_input_files() step_fn", "'1-3'. try: l, r = part.split('-') result.update(str(s) for s in range(int(l), int(r) +", "[options] -- run steps of the GISTEMP algorithm. Options: --help Print this text.", "step3.step3(data) return gio.step3_output(result) def run_step3c(data): \"\"\"An alternative to Step 3 that reads (copies)", "in order to # write its output, and hence suck data through the", "= open(PROGRESS_DIR + \"progress.txt\", 'w') progress.write(\"Setting up parameters...\\n\\n\") # Create all the temporary", "+ '\\n\\n') progress.flush() def mkdir(path): \"\"\"mkdir(PATH): create the directory PATH, and all intermediate-level", "step_fn] if cannot: raise Fatal(\"Can't run steps %s\" % str(cannot)) # Create a", "the original standard output so we can log to it; in steps 2", "= [s for s in step_list if s not in step_fn] if cannot:", "suffix... if re.match(r'^\\d+[a-z]?$', part): result.add(part) else: # Or a range in the form", "= steps.strip() if not steps: return [str(x) for x in range(6)] result =", "# and produces a data object, its output. Ordinarily the data objects #", "stdout. if len(step_list) == 1: logit = \"STEP %s\" % step_list[0] else: assert", "result = step3.step3(data) return gio.step3_output(result) def run_step3c(data): \"\"\"An alternative to Step 3 that", "steps = steps.strip() if not steps: return [str(x) for x in range(6)] result", "if data: raise Fatal(\"Expect to run 3c first in pipeline.\") return gio.step3c_input() def", "# Step 5 takes a land mask as optional input, this is all", "floats... x = getattr(parameters, key) # ... but we need a hack for", "many values to unpack\" when the split # produces too many values (\"1-3-\"),", "= parse_options(argv[1:]) update_parameters(options.parameter) step_list = list(options.steps) # overwrite progress popup if not os.path.exists(PROGRESS_DIR):", "% step_list[0] else: assert len(step_list) >= 2 t = [str(s) for s in", "# write its output, and hence suck data through the whole # pipeline.", "object, its output. Ordinarily the data objects # are iterators, either produced from", "by the Sordinary Step 3. Effectively using the data produced by Step 3", "produced by Step 3 without re-running it.\"\"\" if data: raise Fatal(\"Expect to run", "p) if not hasattr(parameters, key): raise Fatal(\"Ignoring unknown parameter %r\" % key) #", "= \"STEP %s\" % step_list[0] else: assert len(step_list) >= 2 t = [str(s)", "a hack for bool. if type(x) == bool: try: value = ['false', 'true'].index(value.lower())", "step_fn = { '0': run_step0, '1': run_step1, '2': run_step2, '3': run_step3, '3c': run_step3c,", "\"STEP %s\" % step_list[0] else: assert len(step_list) >= 2 t = [str(s) for", "data: pass end_time = time.time() log(\"====> Timing Summary ====\") log(\"Run took %.1f seconds\"", "in ['log', 'result', 'work', \"input\"]: mkdir(TMP_DIR + '/' + d) # delete files", "is None: argv = sys.argv options, args = parse_options(argv[1:]) update_parameters(options.parameter) step_list = list(options.steps)", "raise Fatal(\"Can't understand steps argument.\") return list(sorted(result)) def parse_options(arglist): import optparse usage =", "= [int(x) for x in value.split(',')] value = type(x)(value) setattr(parameters, key, value) #", "value is 0 or 1 and the default case will correctly # coerce", "http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() != rootdir: print(\"The GISTEMP", "if cannot: raise Fatal(\"Can't run steps %s\" % str(cannot)) # Create a message", "logit = \"STEP %s\" % step_list[0] else: assert len(step_list) >= 2 t =", "Record the original standard output so we can log to it; in steps", "changing the value of sys.stdout before calling other modules that # use \"print\"", "gio.step1_output(post) def run_step2(data): from steps import step2 if data is None: data =", "= part.split('-') result.update(str(s) for s in range(int(l), int(r) + 1)) except ValueError: #", "'4': run_step4, '5': run_step5, } # Record start time now, and ending times", "2 and 5 # we'll be changing the value of sys.stdout before calling", "for d in ['log', 'result', 'work', \"input\"]: mkdir(TMP_DIR + '/' + d) #", "'3': run_step3, '3c': run_step3c, '4': run_step4, '5': run_step5, } # Record start time", "can be integer number with an optional letter suffix... if re.match(r'^\\d+[a-z]?$', part): result.add(part)", "in the form '1-3'. try: l, r = part.split('-') result.update(str(s) for s in", "step1 as estep1 if data is None: data = gio.step1_input() pre = estep1.pre_step1(data)", "update_parameters(options.parameter) step_list = list(options.steps) # overwrite progress popup if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress", "= ['false', 'true'].index(value.lower()) except ValueError: raise Fatal(\"Boolean parameter %r must be True or", "handled in # the step5_input() function. data = gio.step5_input(data) result = step5.step5(data) return", "Sordinary Step 3. Effectively using the data produced by Step 3 without re-running", "Now value is 0 or 1 and the default case will correctly #", "unless it already exists.\"\"\" if not os.path.isdir(path): log(\"... creating directory %s\" % path)", "project.\\nPlease change directory \" \"to %s and try again.\" % rootdir) sys.exit() except:", "this text. --steps=STEPS Specify which steps to run, as a comma-separated list of", "True or False\" % key) # Now value is 0 or 1 and", "without re-running it.\"\"\" if data: raise Fatal(\"Expect to run 3c first in pipeline.\")", "for s in range(int(step_list[0]), int(step_list[-1]) + 1)] if step_list == t: logit =", "from steps import step0 if data is None: data = gio.step0_input() result =", "hack for bool. if type(x) == bool: try: value = ['false', 'true'].index(value.lower()) except", "result = step0.step0(data) return gio.step0_output(result) def run_step1(data): from steps import step1 from extension", "gio.step1_input() pre = estep1.pre_step1(data) result = step1.step1(pre) post = estep1.post_step1(result) return gio.step1_output(post) def", "parse_options(argv[1:]) update_parameters(options.parameter) step_list = list(options.steps) # overwrite progress popup if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR)", "run_step3c, '4': run_step4, '5': run_step5, } # Record start time now, and ending", "run_step5, } # Record start time now, and ending times for each step.", "= \"usage: %prog [options]\" parser = optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\", action=\"store\", metavar=\"S[,S]\", default=\"\", help=\"Select", "ValueError: raise Fatal(\"Boolean parameter %r must be True or False\" % key) #", "= None for step in step_list: data = step_fn[step](data) # Consume the data", "# # <NAME>, 2009-12-08 # <NAME>, Revision 2016-01-06 \"\"\"run.cgi [options] -- run steps", "logfile = sys.stdout def log(msg): print(msg, file=logfile) progress = open(PROGRESS_DIR + 'progress.txt', 'a')", "reads (copies) the output file created by the Sordinary Step 3. Effectively using", "if not os.path.isdir(path): log(\"... creating directory %s\" % path) os.makedirs(path) # Each of", "# are iterators, either produced from the previous step, or an iterator #", "the step5_input() function. data = gio.step5_input(data) result = step5.step5(data) return gio.step5_output(result) def parse_steps(steps):", "gio.step5_output(result) def parse_steps(steps): \"\"\"Parse the -s, steps, option. Produces a list of strings.\"\"\"", "[str(s) for s in range(int(step_list[0]), int(step_list[-1]) + 1)] if step_list == t: logit", "_ in data: pass end_time = time.time() log(\"====> Timing Summary ====\") log(\"Run took", "in data: pass end_time = time.time() log(\"====> Timing Summary ====\") log(\"Run took %.1f", "= step0.step0(data) return gio.step0_output(result) def run_step1(data): from steps import step1 from extension import", "ocean # temperatures, from disk; data from earlier stages is land data and", "help=\"Select range of steps to run\") parser.add_option('-p', '--parameter', action='append', help=\"Redefine parameter from parameters/*.py", "step_list[-1]) else: logit = \"STEPS %s\" % ', '.join(step_list) log(\"====> %s ====\" %", "their output. logfile = sys.stdout def log(msg): print(msg, file=logfile) progress = open(PROGRESS_DIR +", "def parse_steps(steps): \"\"\"Parse the -s, steps, option. Produces a list of strings.\"\"\" steps", "import gio class Fatal(Exception): pass # Record the original standard output so we", "value) # Download input files def dl_input_files(): import fetch fetcher = fetch.Fetcher() fetcher.fetch()", "\"STEPS %s\" % ', '.join(step_list) log(\"====> %s ====\" % logit) data = None", "steps of the GISTEMP algorithm. Options: --help Print this text. --steps=STEPS Specify which", "for each step. start_time = time.time() cannot = [s for s in step_list", "%s\" % ', '.join(step_list) log(\"====> %s ====\" % logit) data = None for", "as estep1 if data is None: data = gio.step1_input() pre = estep1.pre_step1(data) result", "= sys.argv options, args = parse_options(argv[1:]) update_parameters(options.parameter) step_list = list(options.steps) # overwrite progress", "for x in range(6)] result = set() for part in steps.split(','): # Part", "os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() != rootdir: print(\"The GISTEMP procedure must be run from the", "os.path.isdir(path): log(\"... creating directory %s\" % path) os.makedirs(path) # Each of the run_stepN", "'0': run_step0, '1': run_step1, '2': run_step2, '3': run_step3, '3c': run_step3c, '4': run_step4, '5':", "for stdout. if len(step_list) == 1: logit = \"STEP %s\" % step_list[0] else:", "the last step was, in order to # write its output, and hence", "http://docs.python.org/release/2.4.4/lib/module-re.html import re # http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd()", "# the step5_input() function. data = gio.step5_input(data) result = step5.step5(data) return gio.step5_output(result) def", "the work sub-directory\") options, args = parser.parse_args(arglist) if len(args) != 0: parser.error(\"Unexpected arguments\")", "= sys.stdout def log(msg): print(msg, file=logfile) progress = open(PROGRESS_DIR + 'progress.txt', 'a') progress.write(msg", "understand steps argument.\") return list(sorted(result)) def parse_options(arglist): import optparse usage = \"usage: %prog", "temporary directories we're going to use. for d in ['log', 'result', 'work', \"input\"]:", "it. elif value[0] == '(' and value[-1] == ')': value = value[1:-1] value", "mkdir(path): \"\"\"mkdir(PATH): create the directory PATH, and all intermediate-level directories needed to contain", "part): result.add(part) else: # Or a range in the form '1-3'. try: l,", "parser.add_option('-p', '--parameter', action='append', help=\"Redefine parameter from parameters/*.py during run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\", default=True,", "def run_step5(data): from steps import step5 # Step 5 takes a land mask", "try again.\" % rootdir) sys.exit() except: sys.exit() sys.path.append(os.getcwd()) from settings import * #", "it, unless it already exists.\"\"\" if not os.path.isdir(path): log(\"... creating directory %s\" %", "step3 if data is None: data = gio.step3_input() result = step3.step3(data) return gio.step3_output(result)", "the files in /tmp/input will be used. dl_input_files() step_fn = { '0': run_step0,", "with base 10: 'a'\" # when int fails (\"1,a\") raise Fatal(\"Can't understand steps", "not steps: return [str(x) for x in range(6)] result = set() for part", "from steps import step3 if data is None: data = gio.step3_input() result =", "from disk; data from earlier stages is land data and # is zipped", "# Clear Climate Code import gio class Fatal(Exception): pass # Record the original", "many values (\"1-3-\"), and # \"ValueError: invalid literal for int() with base 10:", "already exists.\"\"\" if not os.path.isdir(path): log(\"... creating directory %s\" % path) os.makedirs(path) #", "run_step4, '5': run_step5, } # Record start time now, and ending times for", "5. For example, --steps=2,3,5 The steps are run in the order you specify.", "If this option is omitted, run all steps in order. \"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html", "% rootdir) sys.exit() except: sys.exit() sys.path.append(os.getcwd()) from settings import * # Clear Climate", "(copies) the output file created by the Sordinary Step 3. Effectively using the", "in range(6)] result = set() for part in steps.split(','): # Part can be", "else: # Or a range in the form '1-3'. try: l, r =", "in pipeline.\") return gio.step3c_input() def run_step4(data): from steps import step4 # Unlike earlier", "to # write its output, and hence suck data through the whole #", "strings, ints, and floats... x = getattr(parameters, key) # ... but we need", "# Create a message for stdout. if len(step_list) == 1: logit = \"STEP", "output. logfile = sys.stdout def log(msg): print(msg, file=logfile) progress = open(PROGRESS_DIR + 'progress.txt',", "the data in whatever the last step was, in order to # write", "option: %r\" % p) if not hasattr(parameters, key): raise Fatal(\"Ignoring unknown parameter %r\"", "in value.split(',')] value = type(x)(value) setattr(parameters, key, value) # Download input files def", "action='append', help=\"Redefine parameter from parameters/*.py during run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\", default=True, dest=\"save_work\", help=\"Do", "of the project.\\nPlease change directory \" \"to %s and try again.\" % rootdir)", "to re-download the input data files # otherwise the files in /tmp/input will", "that feeds from a file. def run_step0(data): from steps import step0 if data", "steps argument.\") return list(sorted(result)) def parse_options(arglist): import optparse usage = \"usage: %prog [options]\"", "the existing parameter # value. That works nicely for strings, ints, and floats...", "generate their output. logfile = sys.stdout def log(msg): print(msg, file=logfile) progress = open(PROGRESS_DIR", "int(step_list[-1]) + 1)] if step_list == t: logit = \"STEPS %s to %s\"", "\"ValueError: too many values to unpack\" when the split # produces too many", "list of strings.\"\"\" steps = steps.strip() if not steps: return [str(x) for x", "Fatal(\"Can't understand steps argument.\") return list(sorted(result)) def parse_options(arglist): import optparse usage = \"usage:", "# <NAME>, Revision 2016-01-06 \"\"\"run.cgi [options] -- run steps of the GISTEMP algorithm.", "optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\", action=\"store\", metavar=\"S[,S]\", default=\"\", help=\"Select range of steps to run\") parser.add_option('-p',", "run\") parser.add_option('-p', '--parameter', action='append', help=\"Redefine parameter from parameters/*.py during run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\",", "import re # http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() !=", "catch both # \"ValueError: too many values to unpack\" when the split #", "try: key, value = p.split('=', 1) except ValueError: raise Fatal(\"Can't understand parameter option:", "message for stdout. if len(step_list) == 1: logit = \"STEP %s\" % step_list[0]", "need a hack for bool. if type(x) == bool: try: value = ['false',", "'a') progress.write(msg + '\\n\\n') progress.flush() def mkdir(path): \"\"\"mkdir(PATH): create the directory PATH, and", "Climate Code import gio class Fatal(Exception): pass # Record the original standard output", "t = [str(s) for s in range(int(step_list[0]), int(step_list[-1]) + 1)] if step_list ==", "# pipeline. for _ in data: pass end_time = time.time() log(\"====> Timing Summary", "first in pipeline.\") return gio.step3c_input() def run_step4(data): from steps import step4 # Unlike", "as a comma-separated list of numbers from 0 to 5. For example, --steps=2,3,5", "in step_list if s not in step_fn] if cannot: raise Fatal(\"Can't run steps", "len(step_list) >= 2 t = [str(s) for s in range(int(step_list[0]), int(step_list[-1]) + 1)]", "parser.error(\"Unexpected arguments\") options.steps = parse_steps(options.steps) return options, args def update_parameters(parm): \"\"\"Take a parameter", "optional input, this is all handled in # the step5_input() function. data =", "For example, --steps=2,3,5 The steps are run in the order you specify. If", "correctly # coerce it. elif value[0] == '(' and value[-1] == ')': value", "logit = \"STEPS %s to %s\" % (step_list[0], step_list[-1]) else: logit = \"STEPS", "\"progress.txt\", 'w') progress.write(\"Setting up parameters...\\n\\n\") # Create all the temporary directories we're going", "too many values to unpack\" when the split # produces too many values", "def log(msg): print(msg, file=logfile) progress = open(PROGRESS_DIR + 'progress.txt', 'a') progress.write(msg + '\\n\\n')", "steps.strip() if not steps: return [str(x) for x in range(6)] result = set()", "key): raise Fatal(\"Ignoring unknown parameter %r\" % key) # Coerce value, a string,", "in step_fn] if cannot: raise Fatal(\"Can't run steps %s\" % str(cannot)) # Create", "= os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() != rootdir: print(\"The GISTEMP procedure must be run from", "output file created by the Sordinary Step 3. Effectively using the data produced", "import step1 from extension import step1 as estep1 if data is None: data", "% path) os.makedirs(path) # Each of the run_stepN functions below takes a data", "log(\"Run took %.1f seconds\" % (end_time - start_time)) return 0 if __name__ ==", "sys.stdout def log(msg): print(msg, file=logfile) progress = open(PROGRESS_DIR + 'progress.txt', 'a') progress.write(msg +", "function. data = gio.step5_input(data) result = step5.step5(data) return gio.step5_output(result) def parse_steps(steps): \"\"\"Parse the", "part.split('-') result.update(str(s) for s in range(int(l), int(r) + 1)) except ValueError: # Expect", "not in step_fn] if cannot: raise Fatal(\"Can't run steps %s\" % str(cannot)) #", "open(PROGRESS_DIR + \"progress.txt\", 'w') progress.write(\"Setting up parameters...\\n\\n\") # Create all the temporary directories", "step1 from extension import step1 as estep1 if data is None: data =", "order you specify. If this option is omitted, run all steps in order.", "x in range(6)] result = set() for part in steps.split(','): # Part can", "data is None: data = gio.step1_input() pre = estep1.pre_step1(data) result = step1.step1(pre) post", "} # Record start time now, and ending times for each step. start_time", "Step 5 takes a land mask as optional input, this is all handled", "from the root \" \"directory of the project.\\nPlease change directory \" \"to %s", "file created by the Sordinary Step 3. Effectively using the data produced by", "raise Fatal(\"Ignoring unknown parameter %r\" % key) # Coerce value, a string, to", "False\" % key) # Now value is 0 or 1 and the default", "= estep1.post_step1(result) return gio.step1_output(post) def run_step2(data): from steps import step2 if data is", "(\"1,a\") raise Fatal(\"Can't understand steps argument.\") return list(sorted(result)) def parse_options(arglist): import optparse usage", "data = step_fn[step](data) # Consume the data in whatever the last step was,", "files in /tmp/input will be used. dl_input_files() step_fn = { '0': run_step0, '1':", "os.getcwd() != rootdir: print(\"The GISTEMP procedure must be run from the root \"", "Create all the temporary directories we're going to use. for d in ['log',", "modules that # use \"print\" to generate their output. logfile = sys.stdout def", "= set() for part in steps.split(','): # Part can be integer number with", "exists.\"\"\" if not os.path.isdir(path): log(\"... creating directory %s\" % path) os.makedirs(path) # Each", "the Sordinary Step 3. Effectively using the data produced by Step 3 without", "os.makedirs(path) # Each of the run_stepN functions below takes a data object, its", "# Create all the temporary directories we're going to use. for d in", "cannot = [s for s in step_list if s not in step_fn] if", "output so we can log to it; in steps 2 and 5 #", "input data, ocean # temperatures, from disk; data from earlier stages is land", "input data files # otherwise the files in /tmp/input will be used. dl_input_files()", "step_list if s not in step_fn] if cannot: raise Fatal(\"Can't run steps %s\"", "in /tmp/input will be used. dl_input_files() step_fn = { '0': run_step0, '1': run_step1,", "to Step 3 that reads (copies) the output file created by the Sordinary", "rootdir) sys.exit() except: sys.exit() sys.path.append(os.getcwd()) from settings import * # Clear Climate Code", "os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR + \"progress.txt\", 'w') progress.write(\"Setting up parameters...\\n\\n\") # Create", "when int fails (\"1,a\") raise Fatal(\"Can't understand steps argument.\") return list(sorted(result)) def parse_options(arglist):", "data and # is zipped up. data = gio.step4_input(data) result = step4.step4(data) return", "import * # Clear Climate Code import gio class Fatal(Exception): pass # Record", "def mkdir(path): \"\"\"mkdir(PATH): create the directory PATH, and all intermediate-level directories needed to", "parameter %r must be True or False\" % key) # Now value is", "= gio.step1_input() pre = estep1.pre_step1(data) result = step1.step1(pre) post = estep1.post_step1(result) return gio.step1_output(post)", "delete files in /tmp/input to re-download the input data files # otherwise the", "\"\"\"mkdir(PATH): create the directory PATH, and all intermediate-level directories needed to contain it,", "literal for int() with base 10: 'a'\" # when int fails (\"1,a\") raise", "x in value.split(',')] value = type(x)(value) setattr(parameters, key, value) # Download input files", "except: sys.exit() sys.path.append(os.getcwd()) from settings import * # Clear Climate Code import gio", "Download input files def dl_input_files(): import fetch fetcher = fetch.Fetcher() fetcher.fetch() def main(argv=None):", "'1': run_step1, '2': run_step2, '3': run_step3, '3c': run_step3c, '4': run_step4, '5': run_step5, }", "gio.step0_input() result = step0.step0(data) return gio.step0_output(result) def run_step1(data): from steps import step1 from", "step_list = list(options.steps) # overwrite progress popup if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress =", "<NAME>, Revision 2016-01-06 \"\"\"run.cgi [options] -- run steps of the GISTEMP algorithm. Options:", "gio.step2_output(result) def run_step3(data): from steps import step3 if data is None: data =", "!= rootdir: print(\"The GISTEMP procedure must be run from the root \" \"directory", "import step4 # Unlike earlier steps, Step 4 always gets input data, ocean", "# is zipped up. data = gio.step4_input(data) result = step4.step4(data) return gio.step4_output(result) def", "run_stepN functions below takes a data object, its input, # and produces a", "+ 1)] if step_list == t: logit = \"STEPS %s to %s\" %", "['log', 'result', 'work', \"input\"]: mkdir(TMP_DIR + '/' + d) # delete files in", "times for each step. start_time = time.time() cannot = [s for s in", "key) # Coerce value, a string, to the same type as the existing", "# temperatures, from disk; data from earlier stages is land data and #", "re-download the input data files # otherwise the files in /tmp/input will be", "will correctly # coerce it. elif value[0] == '(' and value[-1] == ')':", "raise Fatal(\"Can't run steps %s\" % str(cannot)) # Create a message for stdout.", "of the GISTEMP algorithm. Options: --help Print this text. --steps=STEPS Specify which steps", "sys.exit() except: sys.exit() sys.path.append(os.getcwd()) from settings import * # Clear Climate Code import", "try: value = ['false', 'true'].index(value.lower()) except ValueError: raise Fatal(\"Boolean parameter %r must be", "files def dl_input_files(): import fetch fetcher = fetch.Fetcher() fetcher.fetch() def main(argv=None): import time", "That works nicely for strings, ints, and floats... x = getattr(parameters, key) #", "run steps of the GISTEMP algorithm # # <NAME>, 2009-12-08 # <NAME>, Revision", "% key) # Now value is 0 or 1 and the default case", "# produces too many values (\"1-3-\"), and # \"ValueError: invalid literal for int()", "progress.write(\"Setting up parameters...\\n\\n\") # Create all the temporary directories we're going to use.", "%s\" % path) os.makedirs(path) # Each of the run_stepN functions below takes a", "alternative to Step 3 that reads (copies) the output file created by the", "# Or a range in the form '1-3'. try: l, r = part.split('-')", "for p in parm: try: key, value = p.split('=', 1) except ValueError: raise", "default=True, dest=\"save_work\", help=\"Do not save intermediate files in the work sub-directory\") options, args", "10: 'a'\" # when int fails (\"1,a\") raise Fatal(\"Can't understand steps argument.\") return", "with an optional letter suffix... if re.match(r'^\\d+[a-z]?$', part): result.add(part) else: # Or a", "save intermediate files in the work sub-directory\") options, args = parser.parse_args(arglist) if len(args)", "took %.1f seconds\" % (end_time - start_time)) return 0 if __name__ == '__main__':", "of numbers from 0 to 5. For example, --steps=2,3,5 The steps are run", "not save intermediate files in the work sub-directory\") options, args = parser.parse_args(arglist) if", "of the run_stepN functions below takes a data object, its input, # and", "1)] if step_list == t: logit = \"STEPS %s to %s\" % (step_list[0],", "-s, steps, option. Produces a list of strings.\"\"\" steps = steps.strip() if not", "\"ValueError: invalid literal for int() with base 10: 'a'\" # when int fails", "temperatures, from disk; data from earlier stages is land data and # is", "Code import gio class Fatal(Exception): pass # Record the original standard output so", "files in /tmp/input to re-download the input data files # otherwise the files", "0: parser.error(\"Unexpected arguments\") options.steps = parse_steps(options.steps) return options, args def update_parameters(parm): \"\"\"Take a", "args def update_parameters(parm): \"\"\"Take a parameter string from the command line and update", "the parameters module.\"\"\" if not parm: return import parameters for p in parm:", "object, its input, # and produces a data object, its output. Ordinarily the", "r = part.split('-') result.update(str(s) for s in range(int(l), int(r) + 1)) except ValueError:", "% p) if not hasattr(parameters, key): raise Fatal(\"Ignoring unknown parameter %r\" % key)", "step2.step2(data) return gio.step2_output(result) def run_step3(data): from steps import step3 if data is None:", "s in range(int(l), int(r) + 1)) except ValueError: # Expect to catch both", "of sys.stdout before calling other modules that # use \"print\" to generate their", "Produces a list of strings.\"\"\" steps = steps.strip() if not steps: return [str(x)", "to run 3c first in pipeline.\") return gio.step3c_input() def run_step4(data): from steps import", "run.cgi -- run steps of the GISTEMP algorithm # # <NAME>, 2009-12-08 #", "feeds from a file. def run_step0(data): from steps import step0 if data is", "+ 'progress.txt', 'a') progress.write(msg + '\\n\\n') progress.flush() def mkdir(path): \"\"\"mkdir(PATH): create the directory", "p in parm: try: key, value = p.split('=', 1) except ValueError: raise Fatal(\"Can't", "be True or False\" % key) # Now value is 0 or 1", "value = type(x)(value) setattr(parameters, key, value) # Download input files def dl_input_files(): import", "# otherwise the files in /tmp/input will be used. dl_input_files() step_fn = {", "ending times for each step. start_time = time.time() cannot = [s for s", "return gio.step1_output(post) def run_step2(data): from steps import step2 if data is None: data", "# http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() != rootdir: print(\"The", "# Expect to catch both # \"ValueError: too many values to unpack\" when", "iterator # that feeds from a file. def run_step0(data): from steps import step0", "steps, Step 4 always gets input data, ocean # temperatures, from disk; data", "except ValueError: raise Fatal(\"Boolean parameter %r must be True or False\" % key)", "raise Fatal(\"Boolean parameter %r must be True or False\" % key) # Now", "\"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html import os # http://docs.python.org/release/2.4.4/lib/module-re.html import re # http://www.python.org/doc/2.4.4/lib/module-sys.html import sys", "assert len(step_list) >= 2 t = [str(s) for s in range(int(step_list[0]), int(step_list[-1]) +", "update_parameters(parm): \"\"\"Take a parameter string from the command line and update the parameters", "path) os.makedirs(path) # Each of the run_stepN functions below takes a data object,", "import optparse usage = \"usage: %prog [options]\" parser = optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\", action=\"store\",", "the data produced by Step 3 without re-running it.\"\"\" if data: raise Fatal(\"Expect", "all handled in # the step5_input() function. data = gio.step5_input(data) result = step5.step5(data)", "\"directory of the project.\\nPlease change directory \" \"to %s and try again.\" %", "Step 3. Effectively using the data produced by Step 3 without re-running it.\"\"\"", "parameter string from the command line and update the parameters module.\"\"\" if not", "parm: try: key, value = p.split('=', 1) except ValueError: raise Fatal(\"Can't understand parameter", "progress = open(PROGRESS_DIR + \"progress.txt\", 'w') progress.write(\"Setting up parameters...\\n\\n\") # Create all the", "all the temporary directories we're going to use. for d in ['log', 'result',", "sys try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() != rootdir: print(\"The GISTEMP procedure must", "def update_parameters(parm): \"\"\"Take a parameter string from the command line and update the", "the GISTEMP algorithm # # <NAME>, 2009-12-08 # <NAME>, Revision 2016-01-06 \"\"\"run.cgi [options]", "', '.join(step_list) log(\"====> %s ====\" % logit) data = None for step in", "argv is None: argv = sys.argv options, args = parse_options(argv[1:]) update_parameters(options.parameter) step_list =", "step, or an iterator # that feeds from a file. def run_step0(data): from", "suck data through the whole # pipeline. for _ in data: pass end_time", "if s not in step_fn] if cannot: raise Fatal(\"Can't run steps %s\" %", "but we need a hack for bool. if type(x) == bool: try: value", "Record start time now, and ending times for each step. start_time = time.time()", "%r must be True or False\" % key) # Now value is 0", "the root \" \"directory of the project.\\nPlease change directory \" \"to %s and", "parameters...\\n\\n\") # Create all the temporary directories we're going to use. for d", "dest=\"save_work\", help=\"Do not save intermediate files in the work sub-directory\") options, args =", "Expect to catch both # \"ValueError: too many values to unpack\" when the", "%s ====\" % logit) data = None for step in step_list: data =", "The steps are run in the order you specify. If this option is", "which steps to run, as a comma-separated list of numbers from 0 to", "values to unpack\" when the split # produces too many values (\"1-3-\"), and", "by Step 3 without re-running it.\"\"\" if data: raise Fatal(\"Expect to run 3c", "= estep1.pre_step1(data) result = step1.step1(pre) post = estep1.post_step1(result) return gio.step1_output(post) def run_step2(data): from", "+ d) # delete files in /tmp/input to re-download the input data files", "int() with base 10: 'a'\" # when int fails (\"1,a\") raise Fatal(\"Can't understand", "parse_steps(options.steps) return options, args def update_parameters(parm): \"\"\"Take a parameter string from the command", "logit = \"STEPS %s\" % ', '.join(step_list) log(\"====> %s ====\" % logit) data", "step_list[0] else: assert len(step_list) >= 2 t = [str(s) for s in range(int(step_list[0]),", "= p.split('=', 1) except ValueError: raise Fatal(\"Can't understand parameter option: %r\" % p)", "# ... but we need a hack for bool. if type(x) == bool:", "form '1-3'. try: l, r = part.split('-') result.update(str(s) for s in range(int(l), int(r)", "== t: logit = \"STEPS %s to %s\" % (step_list[0], step_list[-1]) else: logit", "from steps import step4 # Unlike earlier steps, Step 4 always gets input", "Coerce value, a string, to the same type as the existing parameter #", "sub-directory\") options, args = parser.parse_args(arglist) if len(args) != 0: parser.error(\"Unexpected arguments\") options.steps =", "the default case will correctly # coerce it. elif value[0] == '(' and", "pipeline.\") return gio.step3c_input() def run_step4(data): from steps import step4 # Unlike earlier steps,", "elif value[0] == '(' and value[-1] == ')': value = value[1:-1] value =", "'progress.txt', 'a') progress.write(msg + '\\n\\n') progress.flush() def mkdir(path): \"\"\"mkdir(PATH): create the directory PATH,", "run 3c first in pipeline.\") return gio.step3c_input() def run_step4(data): from steps import step4", "use \"print\" to generate their output. logfile = sys.stdout def log(msg): print(msg, file=logfile)", "Step 4 always gets input data, ocean # temperatures, from disk; data from", "\"STEPS %s to %s\" % (step_list[0], step_list[-1]) else: logit = \"STEPS %s\" %", "run_step3(data): from steps import step3 if data is None: data = gio.step3_input() result", "original standard output so we can log to it; in steps 2 and", "re # http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() != rootdir:", "the -s, steps, option. Produces a list of strings.\"\"\" steps = steps.strip() if", "produces too many values (\"1-3-\"), and # \"ValueError: invalid literal for int() with", "%r\" % key) # Coerce value, a string, to the same type as", "fails (\"1,a\") raise Fatal(\"Can't understand steps argument.\") return list(sorted(result)) def parse_options(arglist): import optparse", "must be True or False\" % key) # Now value is 0 or", "os if argv is None: argv = sys.argv options, args = parse_options(argv[1:]) update_parameters(options.parameter)", "Step 3 without re-running it.\"\"\" if data: raise Fatal(\"Expect to run 3c first", "import sys try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() != rootdir: print(\"The GISTEMP procedure", "algorithm # # <NAME>, 2009-12-08 # <NAME>, Revision 2016-01-06 \"\"\"run.cgi [options] -- run", "write its output, and hence suck data through the whole # pipeline. for", "from the previous step, or an iterator # that feeds from a file.", "os # http://docs.python.org/release/2.4.4/lib/module-re.html import re # http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))", "steps in order. \"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html import os # http://docs.python.org/release/2.4.4/lib/module-re.html import re #", "= step1.step1(pre) post = estep1.post_step1(result) return gio.step1_output(post) def run_step2(data): from steps import step2", "# Unlike earlier steps, Step 4 always gets input data, ocean # temperatures,", "to run\") parser.add_option('-p', '--parameter', action='append', help=\"Redefine parameter from parameters/*.py during run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\",", "or False\" % key) # Now value is 0 or 1 and the", "dl_input_files(): import fetch fetcher = fetch.Fetcher() fetcher.fetch() def main(argv=None): import time import os", "'result', 'work', \"input\"]: mkdir(TMP_DIR + '/' + d) # delete files in /tmp/input", "and ending times for each step. start_time = time.time() cannot = [s for", "options.steps = parse_steps(options.steps) return options, args def update_parameters(parm): \"\"\"Take a parameter string from", "data = None for step in step_list: data = step_fn[step](data) # Consume the", "either produced from the previous step, or an iterator # that feeds from", "zipped up. data = gio.step4_input(data) result = step4.step4(data) return gio.step4_output(result) def run_step5(data): from", "run steps of the GISTEMP algorithm. Options: --help Print this text. --steps=STEPS Specify", "ints, and floats... x = getattr(parameters, key) # ... but we need a", "created by the Sordinary Step 3. Effectively using the data produced by Step", "is None: data = gio.step1_input() pre = estep1.pre_step1(data) result = step1.step1(pre) post =", "... but we need a hack for bool. if type(x) == bool: try:", "# Part can be integer number with an optional letter suffix... if re.match(r'^\\d+[a-z]?$',", "Specify which steps to run, as a comma-separated list of numbers from 0", "= gio.step0_input() result = step0.step0(data) return gio.step0_output(result) def run_step1(data): from steps import step1", "'3c': run_step3c, '4': run_step4, '5': run_step5, } # Record start time now, and", "steps: return [str(x) for x in range(6)] result = set() for part in", "all steps in order. \"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html import os # http://docs.python.org/release/2.4.4/lib/module-re.html import re", "gio.step3c_input() def run_step4(data): from steps import step4 # Unlike earlier steps, Step 4", "= optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\", action=\"store\", metavar=\"S[,S]\", default=\"\", help=\"Select range of steps to run\")", "step1.step1(pre) post = estep1.post_step1(result) return gio.step1_output(post) def run_step2(data): from steps import step2 if", "run_step0(data): from steps import step0 if data is None: data = gio.step0_input() result", "int fails (\"1,a\") raise Fatal(\"Can't understand steps argument.\") return list(sorted(result)) def parse_options(arglist): import", "except ValueError: raise Fatal(\"Can't understand parameter option: %r\" % p) if not hasattr(parameters,", "going to use. for d in ['log', 'result', 'work', \"input\"]: mkdir(TMP_DIR + '/'", "# Record the original standard output so we can log to it; in", "create the directory PATH, and all intermediate-level directories needed to contain it, unless", "# coerce it. elif value[0] == '(' and value[-1] == ')': value =", "pass end_time = time.time() log(\"====> Timing Summary ====\") log(\"Run took %.1f seconds\" %", "= open(PROGRESS_DIR + 'progress.txt', 'a') progress.write(msg + '\\n\\n') progress.flush() def mkdir(path): \"\"\"mkdir(PATH): create", "its output. Ordinarily the data objects # are iterators, either produced from the", "file. def run_step0(data): from steps import step0 if data is None: data =", "parameter option: %r\" % p) if not hasattr(parameters, key): raise Fatal(\"Ignoring unknown parameter", "[s for s in step_list if s not in step_fn] if cannot: raise", "gets input data, ocean # temperatures, from disk; data from earlier stages is", "def main(argv=None): import time import os if argv is None: argv = sys.argv", "# value. That works nicely for strings, ints, and floats... x = getattr(parameters,", "string, to the same type as the existing parameter # value. That works", "== ')': value = value[1:-1] value = [int(x) for x in value.split(',')] value", "GISTEMP algorithm # # <NAME>, 2009-12-08 # <NAME>, Revision 2016-01-06 \"\"\"run.cgi [options] --", "letter suffix... if re.match(r'^\\d+[a-z]?$', part): result.add(part) else: # Or a range in the", "output. Ordinarily the data objects # are iterators, either produced from the previous", "data: raise Fatal(\"Expect to run 3c first in pipeline.\") return gio.step3c_input() def run_step4(data):", "%prog [options]\" parser = optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\", action=\"store\", metavar=\"S[,S]\", default=\"\", help=\"Select range of", "= fetch.Fetcher() fetcher.fetch() def main(argv=None): import time import os if argv is None:", "value, a string, to the same type as the existing parameter # value.", "value[-1] == ')': value = value[1:-1] value = [int(x) for x in value.split(',')]", "used. dl_input_files() step_fn = { '0': run_step0, '1': run_step1, '2': run_step2, '3': run_step3,", "data, ocean # temperatures, from disk; data from earlier stages is land data", "import os # http://docs.python.org/release/2.4.4/lib/module-re.html import re # http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try: rootdir =", "and produces a data object, its output. Ordinarily the data objects # are", "command line and update the parameters module.\"\"\" if not parm: return import parameters", "# <NAME>, 2009-12-08 # <NAME>, Revision 2016-01-06 \"\"\"run.cgi [options] -- run steps of", "value = p.split('=', 1) except ValueError: raise Fatal(\"Can't understand parameter option: %r\" %", "from 0 to 5. For example, --steps=2,3,5 The steps are run in the", "data object, its input, # and produces a data object, its output. Ordinarily", "log(msg): print(msg, file=logfile) progress = open(PROGRESS_DIR + 'progress.txt', 'a') progress.write(msg + '\\n\\n') progress.flush()", "earlier steps, Step 4 always gets input data, ocean # temperatures, from disk;", "= { '0': run_step0, '1': run_step1, '2': run_step2, '3': run_step3, '3c': run_step3c, '4':", "step2 if data is None: data = gio.step2_input() result = step2.step2(data) return gio.step2_output(result)", "data through the whole # pipeline. for _ in data: pass end_time =", "import time import os if argv is None: argv = sys.argv options, args", "= time.time() log(\"====> Timing Summary ====\") log(\"Run took %.1f seconds\" % (end_time -", "from steps import step1 from extension import step1 as estep1 if data is", "raise Fatal(\"Can't understand parameter option: %r\" % p) if not hasattr(parameters, key): raise", "we need a hack for bool. if type(x) == bool: try: value =", "for part in steps.split(','): # Part can be integer number with an optional", "rootdir: print(\"The GISTEMP procedure must be run from the root \" \"directory of", "nicely for strings, ints, and floats... x = getattr(parameters, key) # ... but", "2009-12-08 # <NAME>, Revision 2016-01-06 \"\"\"run.cgi [options] -- run steps of the GISTEMP", "a data object, its input, # and produces a data object, its output.", "during run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\", default=True, dest=\"save_work\", help=\"Do not save intermediate files in", "of the GISTEMP algorithm # # <NAME>, 2009-12-08 # <NAME>, Revision 2016-01-06 \"\"\"run.cgi", "a message for stdout. if len(step_list) == 1: logit = \"STEP %s\" %", "run from the root \" \"directory of the project.\\nPlease change directory \" \"to", "list(sorted(result)) def parse_options(arglist): import optparse usage = \"usage: %prog [options]\" parser = optparse.OptionParser(usage)", "and value[-1] == ')': value = value[1:-1] value = [int(x) for x in", "creating directory %s\" % path) os.makedirs(path) # Each of the run_stepN functions below", "to use. for d in ['log', 'result', 'work', \"input\"]: mkdir(TMP_DIR + '/' +", "def run_step0(data): from steps import step0 if data is None: data = gio.step0_input()", "data = gio.step5_input(data) result = step5.step5(data) return gio.step5_output(result) def parse_steps(steps): \"\"\"Parse the -s,", "it.\"\"\" if data: raise Fatal(\"Expect to run 3c first in pipeline.\") return gio.step3c_input()", "if data is None: data = gio.step1_input() pre = estep1.pre_step1(data) result = step1.step1(pre)", "optional letter suffix... if re.match(r'^\\d+[a-z]?$', part): result.add(part) else: # Or a range in", "in parm: try: key, value = p.split('=', 1) except ValueError: raise Fatal(\"Can't understand", "its input, # and produces a data object, its output. Ordinarily the data", "case will correctly # coerce it. elif value[0] == '(' and value[-1] ==", "be used. dl_input_files() step_fn = { '0': run_step0, '1': run_step1, '2': run_step2, '3':", "hasattr(parameters, key): raise Fatal(\"Ignoring unknown parameter %r\" % key) # Coerce value, a", "pre = estep1.pre_step1(data) result = step1.step1(pre) post = estep1.post_step1(result) return gio.step1_output(post) def run_step2(data):", "Unlike earlier steps, Step 4 always gets input data, ocean # temperatures, from", "input, # and produces a data object, its output. Ordinarily the data objects", "and update the parameters module.\"\"\" if not parm: return import parameters for p", "import os if argv is None: argv = sys.argv options, args = parse_options(argv[1:])", "module.\"\"\" if not parm: return import parameters for p in parm: try: key,", "the split # produces too many values (\"1-3-\"), and # \"ValueError: invalid literal", "through the whole # pipeline. for _ in data: pass end_time = time.time()", "and 5 # we'll be changing the value of sys.stdout before calling other", "+ \"progress.txt\", 'w') progress.write(\"Setting up parameters...\\n\\n\") # Create all the temporary directories we're", "if data is None: data = gio.step3_input() result = step3.step3(data) return gio.step3_output(result) def", "return gio.step3_output(result) def run_step3c(data): \"\"\"An alternative to Step 3 that reads (copies) the", "range(int(l), int(r) + 1)) except ValueError: # Expect to catch both # \"ValueError:", "%s to %s\" % (step_list[0], step_list[-1]) else: logit = \"STEPS %s\" % ',", "for s in step_list if s not in step_fn] if cannot: raise Fatal(\"Can't", "'.join(step_list) log(\"====> %s ====\" % logit) data = None for step in step_list:", "Options: --help Print this text. --steps=STEPS Specify which steps to run, as a", "help=\"Redefine parameter from parameters/*.py during run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\", default=True, dest=\"save_work\", help=\"Do not", "if step_list == t: logit = \"STEPS %s to %s\" % (step_list[0], step_list[-1])", "in order. \"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html import os # http://docs.python.org/release/2.4.4/lib/module-re.html import re # http://www.python.org/doc/2.4.4/lib/module-sys.html", "l, r = part.split('-') result.update(str(s) for s in range(int(l), int(r) + 1)) except", "import parameters for p in parm: try: key, value = p.split('=', 1) except", "# we'll be changing the value of sys.stdout before calling other modules that", "Ordinarily the data objects # are iterators, either produced from the previous step,", "functions below takes a data object, its input, # and produces a data", "numbers from 0 to 5. For example, --steps=2,3,5 The steps are run in", "data in whatever the last step was, in order to # write its", "# Coerce value, a string, to the same type as the existing parameter", "in range(int(l), int(r) + 1)) except ValueError: # Expect to catch both #", "'work', \"input\"]: mkdir(TMP_DIR + '/' + d) # delete files in /tmp/input to", "progress.write(msg + '\\n\\n') progress.flush() def mkdir(path): \"\"\"mkdir(PATH): create the directory PATH, and all", "comma-separated list of numbers from 0 to 5. For example, --steps=2,3,5 The steps", "# when int fails (\"1,a\") raise Fatal(\"Can't understand steps argument.\") return list(sorted(result)) def", "are iterators, either produced from the previous step, or an iterator # that", "return gio.step5_output(result) def parse_steps(steps): \"\"\"Parse the -s, steps, option. Produces a list of", "return gio.step0_output(result) def run_step1(data): from steps import step1 from extension import step1 as", "\" \"to %s and try again.\" % rootdir) sys.exit() except: sys.exit() sys.path.append(os.getcwd()) from", "integer number with an optional letter suffix... if re.match(r'^\\d+[a-z]?$', part): result.add(part) else: #", "= parser.parse_args(arglist) if len(args) != 0: parser.error(\"Unexpected arguments\") options.steps = parse_steps(options.steps) return options,", "and the default case will correctly # coerce it. elif value[0] == '('", "run steps %s\" % str(cannot)) # Create a message for stdout. if len(step_list)", "from steps import step5 # Step 5 takes a land mask as optional", "the order you specify. If this option is omitted, run all steps in", "data = gio.step0_input() result = step0.step0(data) return gio.step0_output(result) def run_step1(data): from steps import", "main(argv=None): import time import os if argv is None: argv = sys.argv options,", "= step_fn[step](data) # Consume the data in whatever the last step was, in", "value = [int(x) for x in value.split(',')] value = type(x)(value) setattr(parameters, key, value)", "# # run.cgi -- run steps of the GISTEMP algorithm # # <NAME>,", "if data is None: data = gio.step0_input() result = step0.step0(data) return gio.step0_output(result) def", "the output file created by the Sordinary Step 3. Effectively using the data", "step5_input() function. data = gio.step5_input(data) result = step5.step5(data) return gio.step5_output(result) def parse_steps(steps): \"\"\"Parse", "the GISTEMP algorithm. Options: --help Print this text. --steps=STEPS Specify which steps to", "action=\"store\", metavar=\"S[,S]\", default=\"\", help=\"Select range of steps to run\") parser.add_option('-p', '--parameter', action='append', help=\"Redefine", "standard output so we can log to it; in steps 2 and 5", "if not steps: return [str(x) for x in range(6)] result = set() for", "% ', '.join(step_list) log(\"====> %s ====\" % logit) data = None for step", "return gio.step2_output(result) def run_step3(data): from steps import step3 if data is None: data", "====\" % logit) data = None for step in step_list: data = step_fn[step](data)", "parser.add_option(\"-s\", \"--steps\", action=\"store\", metavar=\"S[,S]\", default=\"\", help=\"Select range of steps to run\") parser.add_option('-p', '--parameter',", "re.match(r'^\\d+[a-z]?$', part): result.add(part) else: # Or a range in the form '1-3'. try:", "d) # delete files in /tmp/input to re-download the input data files #", "run_step2, '3': run_step3, '3c': run_step3c, '4': run_step4, '5': run_step5, } # Record start", "gio.step3_input() result = step3.step3(data) return gio.step3_output(result) def run_step3c(data): \"\"\"An alternative to Step 3", "Fatal(\"Can't run steps %s\" % str(cannot)) # Create a message for stdout. if", "of strings.\"\"\" steps = steps.strip() if not steps: return [str(x) for x in", "order. \"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html import os # http://docs.python.org/release/2.4.4/lib/module-re.html import re # http://www.python.org/doc/2.4.4/lib/module-sys.html import", "a file. def run_step0(data): from steps import step0 if data is None: data", "directories needed to contain it, unless it already exists.\"\"\" if not os.path.isdir(path): log(\"...", "= step2.step2(data) return gio.step2_output(result) def run_step3(data): from steps import step3 if data is", "options, args def update_parameters(parm): \"\"\"Take a parameter string from the command line and", "\"print\" to generate their output. logfile = sys.stdout def log(msg): print(msg, file=logfile) progress", "'--parameter', action='append', help=\"Redefine parameter from parameters/*.py during run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\", default=True, dest=\"save_work\",", "Timing Summary ====\") log(\"Run took %.1f seconds\" % (end_time - start_time)) return 0", "disk; data from earlier stages is land data and # is zipped up.", "key, value = p.split('=', 1) except ValueError: raise Fatal(\"Can't understand parameter option: %r\"", "pipeline. for _ in data: pass end_time = time.time() log(\"====> Timing Summary ====\")", "result = set() for part in steps.split(','): # Part can be integer number", "GISTEMP algorithm. Options: --help Print this text. --steps=STEPS Specify which steps to run,", "range(6)] result = set() for part in steps.split(','): # Part can be integer", "# use \"print\" to generate their output. logfile = sys.stdout def log(msg): print(msg,", "the same type as the existing parameter # value. That works nicely for", "time import os if argv is None: argv = sys.argv options, args =", "omitted, run all steps in order. \"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html import os # http://docs.python.org/release/2.4.4/lib/module-re.html", "\"to %s and try again.\" % rootdir) sys.exit() except: sys.exit() sys.path.append(os.getcwd()) from settings", "s in step_list if s not in step_fn] if cannot: raise Fatal(\"Can't run", "Or a range in the form '1-3'. try: l, r = part.split('-') result.update(str(s)", "string from the command line and update the parameters module.\"\"\" if not parm:", "Fatal(\"Can't understand parameter option: %r\" % p) if not hasattr(parameters, key): raise Fatal(\"Ignoring", "directory %s\" % path) os.makedirs(path) # Each of the run_stepN functions below takes", "post = estep1.post_step1(result) return gio.step1_output(post) def run_step2(data): from steps import step2 if data", "to catch both # \"ValueError: too many values to unpack\" when the split", "the project.\\nPlease change directory \" \"to %s and try again.\" % rootdir) sys.exit()", "return import parameters for p in parm: try: key, value = p.split('=', 1)", "bool: try: value = ['false', 'true'].index(value.lower()) except ValueError: raise Fatal(\"Boolean parameter %r must", "in steps 2 and 5 # we'll be changing the value of sys.stdout", "sys.argv options, args = parse_options(argv[1:]) update_parameters(options.parameter) step_list = list(options.steps) # overwrite progress popup", "'true'].index(value.lower()) except ValueError: raise Fatal(\"Boolean parameter %r must be True or False\" %", "% (step_list[0], step_list[-1]) else: logit = \"STEPS %s\" % ', '.join(step_list) log(\"====> %s", "hence suck data through the whole # pipeline. for _ in data: pass", "to %s\" % (step_list[0], step_list[-1]) else: logit = \"STEPS %s\" % ', '.join(step_list)", "steps %s\" % str(cannot)) # Create a message for stdout. if len(step_list) ==", "option is omitted, run all steps in order. \"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html import os", "list(options.steps) # overwrite progress popup if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR +", "Fatal(\"Boolean parameter %r must be True or False\" % key) # Now value", "base 10: 'a'\" # when int fails (\"1,a\") raise Fatal(\"Can't understand steps argument.\")", "steps import step1 from extension import step1 as estep1 if data is None:", "1: logit = \"STEP %s\" % step_list[0] else: assert len(step_list) >= 2 t", "# Each of the run_stepN functions below takes a data object, its input,", "to contain it, unless it already exists.\"\"\" if not os.path.isdir(path): log(\"... creating directory", "run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\", default=True, dest=\"save_work\", help=\"Do not save intermediate files in the", "None: data = gio.step1_input() pre = estep1.pre_step1(data) result = step1.step1(pre) post = estep1.post_step1(result)", "is None: data = gio.step0_input() result = step0.step0(data) return gio.step0_output(result) def run_step1(data): from", "objects # are iterators, either produced from the previous step, or an iterator", "t: logit = \"STEPS %s to %s\" % (step_list[0], step_list[-1]) else: logit =", "return gio.step4_output(result) def run_step5(data): from steps import step5 # Step 5 takes a", "use. for d in ['log', 'result', 'work', \"input\"]: mkdir(TMP_DIR + '/' + d)", "getattr(parameters, key) # ... but we need a hack for bool. if type(x)", "for step in step_list: data = step_fn[step](data) # Consume the data in whatever", "from earlier stages is land data and # is zipped up. data =", "type(x)(value) setattr(parameters, key, value) # Download input files def dl_input_files(): import fetch fetcher", "%s\" % step_list[0] else: assert len(step_list) >= 2 t = [str(s) for s", "PATH, and all intermediate-level directories needed to contain it, unless it already exists.\"\"\"", "for int() with base 10: 'a'\" # when int fails (\"1,a\") raise Fatal(\"Can't", "parser = optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\", action=\"store\", metavar=\"S[,S]\", default=\"\", help=\"Select range of steps to", "print(msg, file=logfile) progress = open(PROGRESS_DIR + 'progress.txt', 'a') progress.write(msg + '\\n\\n') progress.flush() def", "input, this is all handled in # the step5_input() function. data = gio.step5_input(data)", "'/' + d) # delete files in /tmp/input to re-download the input data", "pass # Record the original standard output so we can log to it;", "estep1.post_step1(result) return gio.step1_output(post) def run_step2(data): from steps import step2 if data is None:", "= \"STEPS %s\" % ', '.join(step_list) log(\"====> %s ====\" % logit) data =", "intermediate-level directories needed to contain it, unless it already exists.\"\"\" if not os.path.isdir(path):", "data is None: data = gio.step2_input() result = step2.step2(data) return gio.step2_output(result) def run_step3(data):", "dl_input_files() step_fn = { '0': run_step0, '1': run_step1, '2': run_step2, '3': run_step3, '3c':", "gio.step2_input() result = step2.step2(data) return gio.step2_output(result) def run_step3(data): from steps import step3 if", "\"usage: %prog [options]\" parser = optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\", action=\"store\", metavar=\"S[,S]\", default=\"\", help=\"Select range", "be run from the root \" \"directory of the project.\\nPlease change directory \"", "Fatal(\"Ignoring unknown parameter %r\" % key) # Coerce value, a string, to the", "x = getattr(parameters, key) # ... but we need a hack for bool.", "if argv is None: argv = sys.argv options, args = parse_options(argv[1:]) update_parameters(options.parameter) step_list", "s not in step_fn] if cannot: raise Fatal(\"Can't run steps %s\" % str(cannot))", "steps import step4 # Unlike earlier steps, Step 4 always gets input data,", "+ '/' + d) # delete files in /tmp/input to re-download the input", "Fatal(Exception): pass # Record the original standard output so we can log to", "raise Fatal(\"Expect to run 3c first in pipeline.\") return gio.step3c_input() def run_step4(data): from", "import step5 # Step 5 takes a land mask as optional input, this", "fetch.Fetcher() fetcher.fetch() def main(argv=None): import time import os if argv is None: argv", "algorithm. Options: --help Print this text. --steps=STEPS Specify which steps to run, as", "{ '0': run_step0, '1': run_step1, '2': run_step2, '3': run_step3, '3c': run_step3c, '4': run_step4,", "mkdir(TMP_DIR + '/' + d) # delete files in /tmp/input to re-download the", "when the split # produces too many values (\"1-3-\"), and # \"ValueError: invalid", "takes a data object, its input, # and produces a data object, its", "+ 1)) except ValueError: # Expect to catch both # \"ValueError: too many", "return [str(x) for x in range(6)] result = set() for part in steps.split(','):", "other modules that # use \"print\" to generate their output. logfile = sys.stdout", "\" \"directory of the project.\\nPlease change directory \" \"to %s and try again.\"", "understand parameter option: %r\" % p) if not hasattr(parameters, key): raise Fatal(\"Ignoring unknown", "input files def dl_input_files(): import fetch fetcher = fetch.Fetcher() fetcher.fetch() def main(argv=None): import", "gio.step3_output(result) def run_step3c(data): \"\"\"An alternative to Step 3 that reads (copies) the output", "def run_step4(data): from steps import step4 # Unlike earlier steps, Step 4 always", "if not hasattr(parameters, key): raise Fatal(\"Ignoring unknown parameter %r\" % key) # Coerce", "%s\" % (step_list[0], step_list[-1]) else: logit = \"STEPS %s\" % ', '.join(step_list) log(\"====>", "for s in range(int(l), int(r) + 1)) except ValueError: # Expect to catch", "example, --steps=2,3,5 The steps are run in the order you specify. If this", "from the command line and update the parameters module.\"\"\" if not parm: return", "--steps=2,3,5 The steps are run in the order you specify. If this option", "1) except ValueError: raise Fatal(\"Can't understand parameter option: %r\" % p) if not", "its output, and hence suck data through the whole # pipeline. for _", "data = gio.step4_input(data) result = step4.step4(data) return gio.step4_output(result) def run_step5(data): from steps import", "%.1f seconds\" % (end_time - start_time)) return 0 if __name__ == '__main__': sys.exit(main())", "we're going to use. for d in ['log', 'result', 'work', \"input\"]: mkdir(TMP_DIR +", "range(int(step_list[0]), int(step_list[-1]) + 1)] if step_list == t: logit = \"STEPS %s to", "root \" \"directory of the project.\\nPlease change directory \" \"to %s and try", "Effectively using the data produced by Step 3 without re-running it.\"\"\" if data:", "this option is omitted, run all steps in order. \"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html import", "set() for part in steps.split(','): # Part can be integer number with an", "if not parm: return import parameters for p in parm: try: key, value", "step4 # Unlike earlier steps, Step 4 always gets input data, ocean #", "files # otherwise the files in /tmp/input will be used. dl_input_files() step_fn =", "Create a message for stdout. if len(step_list) == 1: logit = \"STEP %s\"", "args = parse_options(argv[1:]) update_parameters(options.parameter) step_list = list(options.steps) # overwrite progress popup if not", "if os.getcwd() != rootdir: print(\"The GISTEMP procedure must be run from the root", "land mask as optional input, this is all handled in # the step5_input()", "so we can log to it; in steps 2 and 5 # we'll", "list of numbers from 0 to 5. For example, --steps=2,3,5 The steps are", "from extension import step1 as estep1 if data is None: data = gio.step1_input()", "/tmp/input to re-download the input data files # otherwise the files in /tmp/input", ">= 2 t = [str(s) for s in range(int(step_list[0]), int(step_list[-1]) + 1)] if", "step_list: data = step_fn[step](data) # Consume the data in whatever the last step", "always gets input data, ocean # temperatures, from disk; data from earlier stages", "progress = open(PROGRESS_DIR + 'progress.txt', 'a') progress.write(msg + '\\n\\n') progress.flush() def mkdir(path): \"\"\"mkdir(PATH):", "is None: data = gio.step3_input() result = step3.step3(data) return gio.step3_output(result) def run_step3c(data): \"\"\"An", "not hasattr(parameters, key): raise Fatal(\"Ignoring unknown parameter %r\" % key) # Coerce value,", "time now, and ending times for each step. start_time = time.time() cannot =", "the run_stepN functions below takes a data object, its input, # and produces", "same type as the existing parameter # value. That works nicely for strings,", "and floats... x = getattr(parameters, key) # ... but we need a hack", "estep1.pre_step1(data) result = step1.step1(pre) post = estep1.post_step1(result) return gio.step1_output(post) def run_step2(data): from steps", "run all steps in order. \"\"\" # http://www.python.org/doc/2.4.4/lib/module-os.html import os # http://docs.python.org/release/2.4.4/lib/module-re.html import", "0 to 5. For example, --steps=2,3,5 The steps are run in the order", "the input data files # otherwise the files in /tmp/input will be used.", "3c first in pipeline.\") return gio.step3c_input() def run_step4(data): from steps import step4 #", "all intermediate-level directories needed to contain it, unless it already exists.\"\"\" if not", "key) # Now value is 0 or 1 and the default case will", "result = step4.step4(data) return gio.step4_output(result) def run_step5(data): from steps import step5 # Step", "run_step3, '3c': run_step3c, '4': run_step4, '5': run_step5, } # Record start time now,", "change directory \" \"to %s and try again.\" % rootdir) sys.exit() except: sys.exit()", "ValueError: raise Fatal(\"Can't understand parameter option: %r\" % p) if not hasattr(parameters, key):", "parameters/*.py during run\") parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\", default=True, dest=\"save_work\", help=\"Do not save intermediate files", "time.time() log(\"====> Timing Summary ====\") log(\"Run took %.1f seconds\" % (end_time - start_time))", "key, value) # Download input files def dl_input_files(): import fetch fetcher = fetch.Fetcher()", "steps of the GISTEMP algorithm # # <NAME>, 2009-12-08 # <NAME>, Revision 2016-01-06", "start time now, and ending times for each step. start_time = time.time() cannot", "run, as a comma-separated list of numbers from 0 to 5. For example,", "')': value = value[1:-1] value = [int(x) for x in value.split(',')] value =", "setattr(parameters, key, value) # Download input files def dl_input_files(): import fetch fetcher =", "= gio.step5_input(data) result = step5.step5(data) return gio.step5_output(result) def parse_steps(steps): \"\"\"Parse the -s, steps,", "settings import * # Clear Climate Code import gio class Fatal(Exception): pass #", "work sub-directory\") options, args = parser.parse_args(arglist) if len(args) != 0: parser.error(\"Unexpected arguments\") options.steps", "run_step4(data): from steps import step4 # Unlike earlier steps, Step 4 always gets", "default case will correctly # coerce it. elif value[0] == '(' and value[-1]", "(step_list[0], step_list[-1]) else: logit = \"STEPS %s\" % ', '.join(step_list) log(\"====> %s ====\"", "def parse_options(arglist): import optparse usage = \"usage: %prog [options]\" parser = optparse.OptionParser(usage) parser.add_option(\"-s\",", "data objects # are iterators, either produced from the previous step, or an", "return list(sorted(result)) def parse_options(arglist): import optparse usage = \"usage: %prog [options]\" parser =", "existing parameter # value. That works nicely for strings, ints, and floats... x", "sys.stdout before calling other modules that # use \"print\" to generate their output.", "is land data and # is zipped up. data = gio.step4_input(data) result =", "if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR + \"progress.txt\", 'w') progress.write(\"Setting up parameters...\\n\\n\")", "as optional input, this is all handled in # the step5_input() function. data", "steps, option. Produces a list of strings.\"\"\" steps = steps.strip() if not steps:", "rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() != rootdir: print(\"The GISTEMP procedure must be run", "= [str(s) for s in range(int(step_list[0]), int(step_list[-1]) + 1)] if step_list == t:", "try: rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) if os.getcwd() != rootdir: print(\"The GISTEMP procedure must be", "land data and # is zipped up. data = gio.step4_input(data) result = step4.step4(data)", "# \"ValueError: too many values to unpack\" when the split # produces too", "parameter %r\" % key) # Coerce value, a string, to the same type", "value = ['false', 'true'].index(value.lower()) except ValueError: raise Fatal(\"Boolean parameter %r must be True", "and hence suck data through the whole # pipeline. for _ in data:", "(\"1-3-\"), and # \"ValueError: invalid literal for int() with base 10: 'a'\" #", "0 or 1 and the default case will correctly # coerce it. elif", "data is None: data = gio.step0_input() result = step0.step0(data) return gio.step0_output(result) def run_step1(data):", "Clear Climate Code import gio class Fatal(Exception): pass # Record the original standard", "directory PATH, and all intermediate-level directories needed to contain it, unless it already", "to it; in steps 2 and 5 # we'll be changing the value", "is zipped up. data = gio.step4_input(data) result = step4.step4(data) return gio.step4_output(result) def run_step5(data):", "log(\"... creating directory %s\" % path) os.makedirs(path) # Each of the run_stepN functions", "data = gio.step3_input() result = step3.step3(data) return gio.step3_output(result) def run_step3c(data): \"\"\"An alternative to", "['false', 'true'].index(value.lower()) except ValueError: raise Fatal(\"Boolean parameter %r must be True or False\"", "step_list == t: logit = \"STEPS %s to %s\" % (step_list[0], step_list[-1]) else:", "specify. If this option is omitted, run all steps in order. \"\"\" #", "parser.add_option(\"--no-work_files\", \"--suppress-work-files\", action=\"store_false\", default=True, dest=\"save_work\", help=\"Do not save intermediate files in the work", "run_step0, '1': run_step1, '2': run_step2, '3': run_step3, '3c': run_step3c, '4': run_step4, '5': run_step5,", "step in step_list: data = step_fn[step](data) # Consume the data in whatever the", "if len(step_list) == 1: logit = \"STEP %s\" % step_list[0] else: assert len(step_list)", "# that feeds from a file. def run_step0(data): from steps import step0 if", "range of steps to run\") parser.add_option('-p', '--parameter', action='append', help=\"Redefine parameter from parameters/*.py during", "to generate their output. logfile = sys.stdout def log(msg): print(msg, file=logfile) progress =", "a comma-separated list of numbers from 0 to 5. For example, --steps=2,3,5 The", "= value[1:-1] value = [int(x) for x in value.split(',')] value = type(x)(value) setattr(parameters,", "value of sys.stdout before calling other modules that # use \"print\" to generate", "* # Clear Climate Code import gio class Fatal(Exception): pass # Record the", "data is None: data = gio.step3_input() result = step3.step3(data) return gio.step3_output(result) def run_step3c(data):", "class Fatal(Exception): pass # Record the original standard output so we can log", "import step3 if data is None: data = gio.step3_input() result = step3.step3(data) return", "[int(x) for x in value.split(',')] value = type(x)(value) setattr(parameters, key, value) # Download", "3. Effectively using the data produced by Step 3 without re-running it.\"\"\" if", "parameters for p in parm: try: key, value = p.split('=', 1) except ValueError:", "a parameter string from the command line and update the parameters module.\"\"\" if", "progress popup if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR + \"progress.txt\", 'w') progress.write(\"Setting", "must be run from the root \" \"directory of the project.\\nPlease change directory", "text. --steps=STEPS Specify which steps to run, as a comma-separated list of numbers", "for x in value.split(',')] value = type(x)(value) setattr(parameters, key, value) # Download input", "#!/usr/local/bin/python3.4 # # run.cgi -- run steps of the GISTEMP algorithm # #", "GISTEMP procedure must be run from the root \" \"directory of the project.\\nPlease", "to 5. For example, --steps=2,3,5 The steps are run in the order you", "gio.step4_input(data) result = step4.step4(data) return gio.step4_output(result) def run_step5(data): from steps import step5 #", "fetcher.fetch() def main(argv=None): import time import os if argv is None: argv =", "re-running it.\"\"\" if data: raise Fatal(\"Expect to run 3c first in pipeline.\") return", "'a'\" # when int fails (\"1,a\") raise Fatal(\"Can't understand steps argument.\") return list(sorted(result))", "output, and hence suck data through the whole # pipeline. for _ in", "log to it; in steps 2 and 5 # we'll be changing the", "% logit) data = None for step in step_list: data = step_fn[step](data) #", "to run, as a comma-separated list of numbers from 0 to 5. For", "= \"STEPS %s to %s\" % (step_list[0], step_list[-1]) else: logit = \"STEPS %s\"", "import fetch fetcher = fetch.Fetcher() fetcher.fetch() def main(argv=None): import time import os if", "step. start_time = time.time() cannot = [s for s in step_list if s", "Revision 2016-01-06 \"\"\"run.cgi [options] -- run steps of the GISTEMP algorithm. Options: --help", "file=logfile) progress = open(PROGRESS_DIR + 'progress.txt', 'a') progress.write(msg + '\\n\\n') progress.flush() def mkdir(path):", "%s\" % str(cannot)) # Create a message for stdout. if len(step_list) == 1:", "'2': run_step2, '3': run_step3, '3c': run_step3c, '4': run_step4, '5': run_step5, } # Record", "gio class Fatal(Exception): pass # Record the original standard output so we can", "a land mask as optional input, this is all handled in # the", "action=\"store_false\", default=True, dest=\"save_work\", help=\"Do not save intermediate files in the work sub-directory\") options,", "d in ['log', 'result', 'work', \"input\"]: mkdir(TMP_DIR + '/' + d) # delete", "data object, its output. Ordinarily the data objects # are iterators, either produced", "if type(x) == bool: try: value = ['false', 'true'].index(value.lower()) except ValueError: raise Fatal(\"Boolean", "--help Print this text. --steps=STEPS Specify which steps to run, as a comma-separated", "using the data produced by Step 3 without re-running it.\"\"\" if data: raise", "and # \"ValueError: invalid literal for int() with base 10: 'a'\" # when", "else: logit = \"STEPS %s\" % ', '.join(step_list) log(\"====> %s ====\" % logit)", "in the order you specify. If this option is omitted, run all steps", "run_step5(data): from steps import step5 # Step 5 takes a land mask as", "# \"ValueError: invalid literal for int() with base 10: 'a'\" # when int", "metavar=\"S[,S]\", default=\"\", help=\"Select range of steps to run\") parser.add_option('-p', '--parameter', action='append', help=\"Redefine parameter", "value[1:-1] value = [int(x) for x in value.split(',')] value = type(x)(value) setattr(parameters, key,", "parameters module.\"\"\" if not parm: return import parameters for p in parm: try:", "argv = sys.argv options, args = parse_options(argv[1:]) update_parameters(options.parameter) step_list = list(options.steps) # overwrite", "\"\"\"run.cgi [options] -- run steps of the GISTEMP algorithm. Options: --help Print this", "5 takes a land mask as optional input, this is all handled in", "= gio.step2_input() result = step2.step2(data) return gio.step2_output(result) def run_step3(data): from steps import step3", "sys.exit() sys.path.append(os.getcwd()) from settings import * # Clear Climate Code import gio class", "each step. start_time = time.time() cannot = [s for s in step_list if", "to unpack\" when the split # produces too many values (\"1-3-\"), and #", "mask as optional input, this is all handled in # the step5_input() function.", "steps import step0 if data is None: data = gio.step0_input() result = step0.step0(data)", "def dl_input_files(): import fetch fetcher = fetch.Fetcher() fetcher.fetch() def main(argv=None): import time import", "step0.step0(data) return gio.step0_output(result) def run_step1(data): from steps import step1 from extension import step1", "Fatal(\"Expect to run 3c first in pipeline.\") return gio.step3c_input() def run_step4(data): from steps", "= step4.step4(data) return gio.step4_output(result) def run_step5(data): from steps import step5 # Step 5", "% str(cannot)) # Create a message for stdout. if len(step_list) == 1: logit", "an optional letter suffix... if re.match(r'^\\d+[a-z]?$', part): result.add(part) else: # Or a range", "# Download input files def dl_input_files(): import fetch fetcher = fetch.Fetcher() fetcher.fetch() def", "extension import step1 as estep1 if data is None: data = gio.step1_input() pre", "2 t = [str(s) for s in range(int(step_list[0]), int(step_list[-1]) + 1)] if step_list", "files in the work sub-directory\") options, args = parser.parse_args(arglist) if len(args) != 0:", "import step2 if data is None: data = gio.step2_input() result = step2.step2(data) return", "\"input\"]: mkdir(TMP_DIR + '/' + d) # delete files in /tmp/input to re-download", "a data object, its output. Ordinarily the data objects # are iterators, either", "range in the form '1-3'. try: l, r = part.split('-') result.update(str(s) for s", "whole # pipeline. for _ in data: pass end_time = time.time() log(\"====> Timing", "return gio.step3c_input() def run_step4(data): from steps import step4 # Unlike earlier steps, Step", "iterators, either produced from the previous step, or an iterator # that feeds", "data = gio.step1_input() pre = estep1.pre_step1(data) result = step1.step1(pre) post = estep1.post_step1(result) return", "\"\"\"Take a parameter string from the command line and update the parameters module.\"\"\"", "a list of strings.\"\"\" steps = steps.strip() if not steps: return [str(x) for", "is 0 or 1 and the default case will correctly # coerce it.", "== 1: logit = \"STEP %s\" % step_list[0] else: assert len(step_list) >= 2", "steps to run\") parser.add_option('-p', '--parameter', action='append', help=\"Redefine parameter from parameters/*.py during run\") parser.add_option(\"--no-work_files\",", "cannot: raise Fatal(\"Can't run steps %s\" % str(cannot)) # Create a message for", "log(\"====> Timing Summary ====\") log(\"Run took %.1f seconds\" % (end_time - start_time)) return", "produces a data object, its output. Ordinarily the data objects # are iterators,", "usage = \"usage: %prog [options]\" parser = optparse.OptionParser(usage) parser.add_option(\"-s\", \"--steps\", action=\"store\", metavar=\"S[,S]\", default=\"\",", "steps import step2 if data is None: data = gio.step2_input() result = step2.step2(data)", "before calling other modules that # use \"print\" to generate their output. logfile", "open(PROGRESS_DIR + 'progress.txt', 'a') progress.write(msg + '\\n\\n') progress.flush() def mkdir(path): \"\"\"mkdir(PATH): create the", "and all intermediate-level directories needed to contain it, unless it already exists.\"\"\" if", "works nicely for strings, ints, and floats... x = getattr(parameters, key) # ...", "takes a land mask as optional input, this is all handled in #", "type as the existing parameter # value. That works nicely for strings, ints,", "not os.path.isdir(path): log(\"... creating directory %s\" % path) os.makedirs(path) # Each of the", "# Record start time now, and ending times for each step. start_time =", "update the parameters module.\"\"\" if not parm: return import parameters for p in", "options, args = parse_options(argv[1:]) update_parameters(options.parameter) step_list = list(options.steps) # overwrite progress popup if", "'\\n\\n') progress.flush() def mkdir(path): \"\"\"mkdir(PATH): create the directory PATH, and all intermediate-level directories", "run_step1, '2': run_step2, '3': run_step3, '3c': run_step3c, '4': run_step4, '5': run_step5, } #", "-- run steps of the GISTEMP algorithm # # <NAME>, 2009-12-08 # <NAME>,", "we'll be changing the value of sys.stdout before calling other modules that #", "# overwrite progress popup if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR + \"progress.txt\",", "up parameters...\\n\\n\") # Create all the temporary directories we're going to use. for", "the data objects # are iterators, either produced from the previous step, or", "= parse_steps(options.steps) return options, args def update_parameters(parm): \"\"\"Take a parameter string from the", "in # the step5_input() function. data = gio.step5_input(data) result = step5.step5(data) return gio.step5_output(result)", "run in the order you specify. If this option is omitted, run all", "result = step1.step1(pre) post = estep1.post_step1(result) return gio.step1_output(post) def run_step2(data): from steps import", "step5 # Step 5 takes a land mask as optional input, this is", "step was, in order to # write its output, and hence suck data", "now, and ending times for each step. start_time = time.time() cannot = [s", "the form '1-3'. try: l, r = part.split('-') result.update(str(s) for s in range(int(l),", "whatever the last step was, in order to # write its output, and", "start_time = time.time() cannot = [s for s in step_list if s not", "logit) data = None for step in step_list: data = step_fn[step](data) # Consume", "default=\"\", help=\"Select range of steps to run\") parser.add_option('-p', '--parameter', action='append', help=\"Redefine parameter from", "except ValueError: # Expect to catch both # \"ValueError: too many values to", "from settings import * # Clear Climate Code import gio class Fatal(Exception): pass", "in whatever the last step was, in order to # write its output,", "for bool. if type(x) == bool: try: value = ['false', 'true'].index(value.lower()) except ValueError:", "steps to run, as a comma-separated list of numbers from 0 to 5.", "def run_step3c(data): \"\"\"An alternative to Step 3 that reads (copies) the output file", "gio.step0_output(result) def run_step1(data): from steps import step1 from extension import step1 as estep1", "directories we're going to use. for d in ['log', 'result', 'work', \"input\"]: mkdir(TMP_DIR", "too many values (\"1-3-\"), and # \"ValueError: invalid literal for int() with base", "end_time = time.time() log(\"====> Timing Summary ====\") log(\"Run took %.1f seconds\" % (end_time", "= gio.step4_input(data) result = step4.step4(data) return gio.step4_output(result) def run_step5(data): from steps import step5", "'w') progress.write(\"Setting up parameters...\\n\\n\") # Create all the temporary directories we're going to", "unpack\" when the split # produces too many values (\"1-3-\"), and # \"ValueError:", "popup if not os.path.exists(PROGRESS_DIR): os.makedirs(PROGRESS_DIR) progress = open(PROGRESS_DIR + \"progress.txt\", 'w') progress.write(\"Setting up", "Part can be integer number with an optional letter suffix... if re.match(r'^\\d+[a-z]?$', part):", "values (\"1-3-\"), and # \"ValueError: invalid literal for int() with base 10: 'a'\"", "% key) # Coerce value, a string, to the same type as the", "the temporary directories we're going to use. for d in ['log', 'result', 'work',", "are run in the order you specify. If this option is omitted, run", "else: assert len(step_list) >= 2 t = [str(s) for s in range(int(step_list[0]), int(step_list[-1])", "steps import step5 # Step 5 takes a land mask as optional input,", "up. data = gio.step4_input(data) result = step4.step4(data) return gio.step4_output(result) def run_step5(data): from steps", "in /tmp/input to re-download the input data files # otherwise the files in", "import step1 as estep1 if data is None: data = gio.step1_input() pre =", "= step5.step5(data) return gio.step5_output(result) def parse_steps(steps): \"\"\"Parse the -s, steps, option. Produces a", "sys.path.append(os.getcwd()) from settings import * # Clear Climate Code import gio class Fatal(Exception):", "'5': run_step5, } # Record start time now, and ending times for each", "earlier stages is land data and # is zipped up. data = gio.step4_input(data)", "# http://www.python.org/doc/2.4.4/lib/module-os.html import os # http://docs.python.org/release/2.4.4/lib/module-re.html import re # http://www.python.org/doc/2.4.4/lib/module-sys.html import sys try:", "== '(' and value[-1] == ')': value = value[1:-1] value = [int(x) for", "= type(x)(value) setattr(parameters, key, value) # Download input files def dl_input_files(): import fetch", "value[0] == '(' and value[-1] == ')': value = value[1:-1] value = [int(x)", "part in steps.split(','): # Part can be integer number with an optional letter", "calling other modules that # use \"print\" to generate their output. logfile =", "value.split(',')] value = type(x)(value) setattr(parameters, key, value) # Download input files def dl_input_files():", "log(\"====> %s ====\" % logit) data = None for step in step_list: data", "option. Produces a list of strings.\"\"\" steps = steps.strip() if not steps: return", "directory \" \"to %s and try again.\" % rootdir) sys.exit() except: sys.exit() sys.path.append(os.getcwd())" ]
[ "diff_species): \"\"\"Append all sites in a unit cell to a structure - must", "def visLattice(lattice): from pymatgen import Structure, Lattice import nglview as ngl unit_cell= Structure(lattice,", "enumerate(SC.sites): if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15): selec=selec+[ind] view6 = ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick', aspectRatio=10, selection=selec)", "Structure, Lattice import nglview as ngl unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell)", "the basis [0,0,0] f=f[1:] #Add a basis at each of the unit cell", "superCell = cell.copy() prim_vec = np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms with different combinations of lattice", "def cubicCell(cell, a3): \"\"\"Append all sites in a unit cell to a structure\"\"\"", "range(2) for j in range(2) for k in range(2)] #Remove the lattice point", "cubicCell(cell, a3): \"\"\"Append all sites in a unit cell to a structure\"\"\" from", "- must have cubic lattice\"\"\" #diff_species: Boolean, if true, make different species diff", "#Add a basis at each of the unit cell lattice points if diff_species:", "all coordinates exceed cubic cell [1,1,1] i = -1 j = -1 k", "in new_coord): #while all(x <=thr_a for x in new_coord): #while all(x <= thr_a", "cubic unit cell (i.e all the corners) f=[[i,j,k] for i in range(2) for", "all(site.coords<=a3*1.15): selec=selec+[ind] view6 = ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick', aspectRatio=10, selection=selec) [view6.add_representation('ball+stick', aspectRatio=5, selection=[i]) for", "visLattice(lattice): from pymatgen import Structure, Lattice import nglview as ngl unit_cell= Structure(lattice, ['Cl'],", "If false, make one basis group one color. #Get a copy of the", "lattice\"\"\" #diff_species: Boolean, if true, make different species diff colors. If false, make", "i in range(2) for j in range(2) for k in range(2)] #Remove the", "ngl selec=[] for ind, site in enumerate(SC.sites): if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15): selec=selec+[ind] view6", "of the unit cell lattice points if diff_species: [superCell.append(atom.species, atom.frac_coords+f[site]) for atom in", "a3): from pymatgen import Structure, Lattice import nglview as ngl selec=[] for ind,", "all sites in a unit cell to a structure - must have cubic", "thr_a for x in new_coord): for i in range(-3, 3): for j in", "a unit cell to a structure - must have cubic lattice\"\"\" #diff_species: Boolean,", "Lattice import nglview as ngl unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations()", "prim_vec = np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms with different combinations of lattice vectors until all", "make_supercell(cell, diff_species): \"\"\"Append all sites in a unit cell to a structure -", "basis=cell.copy() superCell = cell.copy() prim_vec = np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms with different combinations of", "import numpy as np import nglview as ngl basis=cell.copy() superCell = cell.copy() prim_vec", "in basis] #k +=1 #print(new_coord) #print(i, j, k) #j +=1 #k=-1 #new_coord =", "basis basis=cell.copy() superCell = cell.copy() #Create a list of all the lattice points", "ngl unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell() return(view6) def visUC(SC,", "pymatgen import Structure, Lattice import nglview as ngl selec=[] for ind, site in", "import Structure, Lattice import numpy as np import nglview as ngl basis=cell.copy() superCell", "atom.coords + new_coord, coords_are_cartesian=True) for atom in basis] #k +=1 #print(new_coord) #print(i, j,", "new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k [superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True) for atom", "pymatgen import Structure, Lattice import numpy as np import nglview as ngl basis=cell.copy()", "basis] #k +=1 #print(new_coord) #print(i, j, k) #j +=1 #k=-1 #new_coord = prim_vec[0]*i", "#j=-1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k return(superCell) def visLattice(lattice): from pymatgen", "to a structure\"\"\" from pymatgen import Structure, Lattice import numpy as np import", "a unit cell to a structure\"\"\" from pymatgen import Structure, Lattice import numpy", "[1,1,1] i = -1 j = -1 k = -1 #Since not perfect", "= prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k return(superCell) def visLattice(lattice): from pymatgen import Structure, Lattice", "the unit cell lattice points if diff_species: [superCell.append(atom.species, atom.frac_coords+f[site]) for atom in basis", "cubic lattice\"\"\" #diff_species: Boolean, if true, make different species diff colors. If false,", "for k in range(2)] #Remove the lattice point associated with the basis [0,0,0]", "cubic cell [1,1,1] i = -1 j = -1 k = -1 #Since", "[superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True) for atom in basis] #k +=1 #print(new_coord) #print(i,", "for i in range(2) for j in range(2) for k in range(2)] #Remove", "from pymatgen import Structure, Lattice import nglview as ngl unit_cell= Structure(lattice, ['Cl'], [[0,0,0]],", "each of the unit cell lattice points if diff_species: [superCell.append(atom.species, atom.frac_coords+f[site]) for atom", "make different species diff colors. If false, make one basis group one color.", "superCell = cell.copy() #Create a list of all the lattice points in the", "unit cell to a structure\"\"\" from pymatgen import Structure, Lattice import numpy as", "new_coord): #while all(x <= thr_a for x in new_coord): for i in range(-3,", "#k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k #i+=1 #j=-1 #k=-1 #new_coord = prim_vec[0]*i", "k) #j +=1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k #i+=1 #j=-1 #k=-1", "range(2)] #Remove the lattice point associated with the basis [0,0,0] f=f[1:] #Add a", "if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15): selec=selec+[ind] view6 = ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick', aspectRatio=10, selection=selec) [view6.add_representation('ball+stick',", "to a structure - must have cubic lattice\"\"\" #diff_species: Boolean, if true, make", "structure defining the basis basis=cell.copy() superCell = cell.copy() #Create a list of all", "diff colors. If false, make one basis group one color. #Get a copy", "selec=selec+[ind] view6 = ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick', aspectRatio=10, selection=selec) [view6.add_representation('ball+stick', aspectRatio=5, selection=[i]) for i", "the lattice point associated with the basis [0,0,0] f=f[1:] #Add a basis at", "+ prim_vec[2]*k #i+=1 #j=-1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k return(superCell) def", "-1 k = -1 #Since not perfect thr_a = a3*1.15 coord_base = [0,0,0]", "import nglview as ngl basis=cell.copy() superCell = cell.copy() prim_vec = np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms", "j in range(2) for k in range(2)] #Remove the lattice point associated with", "#j +=1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k #i+=1 #j=-1 #k=-1 #new_coord", "copy of the structure defining the basis basis=cell.copy() superCell = cell.copy() #Create a", "sites in a unit cell to a structure - must have cubic lattice\"\"\"", "structure\"\"\" from pymatgen import Structure, Lattice import numpy as np import nglview as", "coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell() return(view6) def visUC(SC, a3): from pymatgen import Structure, Lattice", "new_coord): #while all(x <=thr_a for x in new_coord): #while all(x <= thr_a for", "Structure, Lattice import numpy as np import nglview as ngl basis=cell.copy() superCell =", "for site in range(len(f))] else: [superCell.append(atom.specie, atom.frac_coords+f[site]) for site in range(len(f)) for atom", "prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k #i+=1 #j=-1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k", "combinations of lattice vectors until all coordinates exceed cubic cell [1,1,1] i =", "= -1 j = -1 k = -1 #Since not perfect thr_a =", "view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell() return(view6) def visUC(SC, a3): from pymatgen import Structure, Lattice import", "= [0,0,0] new_coord = coord_base.copy() #while all(x <=thr_a for x in new_coord): #while", "false, make one basis group one color. #Get a copy of the structure", "<reponame>KCMak653/MSE430Notebooks<filename>MSE430Funcs/CrysStrucFuncs.py<gh_stars>0 def make_supercell(cell, diff_species): \"\"\"Append all sites in a unit cell to a", "new_coord, coords_are_cartesian=True) for atom in basis] #k +=1 #print(new_coord) #print(i, j, k) #j", "in range(-3, 3): for k in range(-3,3): new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k", "f=[[i,j,k] for i in range(2) for j in range(2) for k in range(2)]", "+=1 #print(new_coord) #print(i, j, k) #j +=1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j +", "if diff_species: [superCell.append(atom.species, atom.frac_coords+f[site]) for atom in basis for site in range(len(f))] else:", "site in range(len(f))] else: [superCell.append(atom.specie, atom.frac_coords+f[site]) for site in range(len(f)) for atom in", "basis group one color. #Get a copy of the structure defining the basis", "prim_vec[2]*k #i+=1 #j=-1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k return(superCell) def visLattice(lattice):", "3): for k in range(-3,3): new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k [superCell.append(atom.species, atom.coords", "= -1 #Since not perfect thr_a = a3*1.15 coord_base = [0,0,0] new_coord =", "import Structure, Lattice import nglview as ngl unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False)", "& all(site.coords<=a3*1.15): selec=selec+[ind] view6 = ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick', aspectRatio=10, selection=selec) [view6.add_representation('ball+stick', aspectRatio=5, selection=[i])", "not perfect thr_a = a3*1.15 coord_base = [0,0,0] new_coord = coord_base.copy() #while all(x", "color. #Get a copy of the structure defining the basis basis=cell.copy() superCell =", "a3): \"\"\"Append all sites in a unit cell to a structure\"\"\" from pymatgen", "atom.frac_coords+f[site]) for atom in basis for site in range(len(f))] else: [superCell.append(atom.specie, atom.frac_coords+f[site]) for", "j in range(-3, 3): for k in range(-3,3): new_coord = prim_vec[0]*i +prim_vec[1]*j +", "#new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k #i+=1 #j=-1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j", "in range(2) for k in range(2)] #Remove the lattice point associated with the", "(i.e all the corners) f=[[i,j,k] for i in range(2) for j in range(2)", "all(x <= thr_a for x in new_coord): for i in range(-3, 3): for", "= a3*1.15 coord_base = [0,0,0] new_coord = coord_base.copy() #while all(x <=thr_a for x", "#while all(x <=thr_a for x in new_coord): #while all(x <= thr_a for x", "the structure defining the basis basis=cell.copy() superCell = cell.copy() #Create a list of", "a list of all the lattice points in the cubic unit cell (i.e", "#k +=1 #print(new_coord) #print(i, j, k) #j +=1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j", "in new_coord): for i in range(-3, 3): for j in range(-3, 3): for", "Lattice import nglview as ngl selec=[] for ind, site in enumerate(SC.sites): if all(site.coords>=-a3*.15)", "as ngl basis=cell.copy() superCell = cell.copy() prim_vec = np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms with different", "the basis basis=cell.copy() superCell = cell.copy() #Create a list of all the lattice", "for k in range(-3,3): new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k [superCell.append(atom.species, atom.coords +", "have cubic lattice\"\"\" #diff_species: Boolean, if true, make different species diff colors. If", "= prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k [superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True) for atom in", "for atom in basis for site in range(len(f))] else: [superCell.append(atom.specie, atom.frac_coords+f[site]) for site", "#i+=1 #j=-1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k return(superCell) def visLattice(lattice): from", "Boolean, if true, make different species diff colors. If false, make one basis", "3): for j in range(-3, 3): for k in range(-3,3): new_coord = prim_vec[0]*i", "points if diff_species: [superCell.append(atom.species, atom.frac_coords+f[site]) for atom in basis for site in range(len(f))]", "for ind, site in enumerate(SC.sites): if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15): selec=selec+[ind] view6 = ngl.show_pymatgen(SC)", "basis [0,0,0] f=f[1:] #Add a basis at each of the unit cell lattice", "one color. #Get a copy of the structure defining the basis basis=cell.copy() superCell", "range(-3, 3): for j in range(-3, 3): for k in range(-3,3): new_coord =", "of lattice vectors until all coordinates exceed cubic cell [1,1,1] i = -1", "of the structure defining the basis basis=cell.copy() superCell = cell.copy() #Create a list", "<= thr_a for x in new_coord): for i in range(-3, 3): for j", "all the lattice points in the cubic unit cell (i.e all the corners)", "all(x <=thr_a for x in new_coord): #while all(x <= thr_a for x in", "= cell.copy() prim_vec = np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms with different combinations of lattice vectors", "perfect thr_a = a3*1.15 coord_base = [0,0,0] new_coord = coord_base.copy() #while all(x <=thr_a", "[0,0,0] f=f[1:] #Add a basis at each of the unit cell lattice points", "in range(-3,3): new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k [superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True)", "for i in range(-3, 3): for j in range(-3, 3): for k in", "must have cubic lattice\"\"\" #diff_species: Boolean, if true, make different species diff colors.", "a copy of the structure defining the basis basis=cell.copy() superCell = cell.copy() #Create", "a structure - must have cubic lattice\"\"\" #diff_species: Boolean, if true, make different", "[superCell.append(atom.specie, atom.frac_coords+f[site]) for site in range(len(f)) for atom in basis] return(superCell) def cubicCell(cell,", "k in range(-3,3): new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k [superCell.append(atom.species, atom.coords + new_coord,", "true, make different species diff colors. If false, make one basis group one", "pymatgen import Structure, Lattice import nglview as ngl unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True,", "all(x <=thr_a for x in new_coord): #while all(x <=thr_a for x in new_coord):", "for x in new_coord): #while all(x <= thr_a for x in new_coord): for", "+ prim_vec[2]*k [superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True) for atom in basis] #k +=1", "unit cell lattice points if diff_species: [superCell.append(atom.species, atom.frac_coords+f[site]) for atom in basis for", "if true, make different species diff colors. If false, make one basis group", "<=thr_a for x in new_coord): #while all(x <=thr_a for x in new_coord): #while", "site in range(len(f)) for atom in basis] return(superCell) def cubicCell(cell, a3): \"\"\"Append all", "#print(new_coord) #print(i, j, k) #j +=1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k", "in range(-3, 3): for j in range(-3, 3): for k in range(-3,3): new_coord", "sites in a unit cell to a structure\"\"\" from pymatgen import Structure, Lattice", "i in range(-3, 3): for j in range(-3, 3): for k in range(-3,3):", "range(-3, 3): for k in range(-3,3): new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k [superCell.append(atom.species,", "nglview as ngl basis=cell.copy() superCell = cell.copy() prim_vec = np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms with", "atom in basis] return(superCell) def cubicCell(cell, a3): \"\"\"Append all sites in a unit", "[0,0,0] new_coord = coord_base.copy() #while all(x <=thr_a for x in new_coord): #while all(x", "until all coordinates exceed cubic cell [1,1,1] i = -1 j = -1", "def visUC(SC, a3): from pymatgen import Structure, Lattice import nglview as ngl selec=[]", "with different combinations of lattice vectors until all coordinates exceed cubic cell [1,1,1]", "#Get a copy of the structure defining the basis basis=cell.copy() superCell = cell.copy()", "as np import nglview as ngl basis=cell.copy() superCell = cell.copy() prim_vec = np.asarray(cell.lattice.as_dict().get('matrix'))", "prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k [superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True) for atom in basis]", "new_coord): for i in range(-3, 3): for j in range(-3, 3): for k", "all the corners) f=[[i,j,k] for i in range(2) for j in range(2) for", "Structure, Lattice import nglview as ngl selec=[] for ind, site in enumerate(SC.sites): if", "in range(len(f)) for atom in basis] return(superCell) def cubicCell(cell, a3): \"\"\"Append all sites", "cell to a structure\"\"\" from pymatgen import Structure, Lattice import numpy as np", "x in new_coord): #while all(x <= thr_a for x in new_coord): for i", "range(len(f)) for atom in basis] return(superCell) def cubicCell(cell, a3): \"\"\"Append all sites in", "Lattice import numpy as np import nglview as ngl basis=cell.copy() superCell = cell.copy()", "in new_coord): #while all(x <= thr_a for x in new_coord): for i in", "-1 #Since not perfect thr_a = a3*1.15 coord_base = [0,0,0] new_coord = coord_base.copy()", "#while all(x <=thr_a for x in new_coord): #while all(x <=thr_a for x in", "basis] return(superCell) def cubicCell(cell, a3): \"\"\"Append all sites in a unit cell to", "atom in basis for site in range(len(f))] else: [superCell.append(atom.specie, atom.frac_coords+f[site]) for site in", "in range(len(f))] else: [superCell.append(atom.specie, atom.frac_coords+f[site]) for site in range(len(f)) for atom in basis]", "view6.clear_representations() view6.add_unitcell() return(view6) def visUC(SC, a3): from pymatgen import Structure, Lattice import nglview", "nglview as ngl unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell() return(view6)", "associated with the basis [0,0,0] f=f[1:] #Add a basis at each of the", "cell [1,1,1] i = -1 j = -1 k = -1 #Since not", "prim_vec[2]*k return(superCell) def visLattice(lattice): from pymatgen import Structure, Lattice import nglview as ngl", "k = -1 #Since not perfect thr_a = a3*1.15 coord_base = [0,0,0] new_coord", "for site in range(len(f)) for atom in basis] return(superCell) def cubicCell(cell, a3): \"\"\"Append", "[superCell.append(atom.species, atom.frac_coords+f[site]) for atom in basis for site in range(len(f))] else: [superCell.append(atom.specie, atom.frac_coords+f[site])", "+ prim_vec[2]*k return(superCell) def visLattice(lattice): from pymatgen import Structure, Lattice import nglview as", "range(2) for k in range(2)] #Remove the lattice point associated with the basis", "points in the cubic unit cell (i.e all the corners) f=[[i,j,k] for i", "unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell() return(view6) def visUC(SC, a3):", "#new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k return(superCell) def visLattice(lattice): from pymatgen import Structure,", "in basis for site in range(len(f))] else: [superCell.append(atom.specie, atom.frac_coords+f[site]) for site in range(len(f))", "in the cubic unit cell (i.e all the corners) f=[[i,j,k] for i in", "#k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k return(superCell) def visLattice(lattice): from pymatgen import", "basis for site in range(len(f))] else: [superCell.append(atom.specie, atom.frac_coords+f[site]) for site in range(len(f)) for", "np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms with different combinations of lattice vectors until all coordinates exceed", "-1 j = -1 k = -1 #Since not perfect thr_a = a3*1.15", "new_coord = coord_base.copy() #while all(x <=thr_a for x in new_coord): #while all(x <=thr_a", "the cubic unit cell (i.e all the corners) f=[[i,j,k] for i in range(2)", "from pymatgen import Structure, Lattice import nglview as ngl selec=[] for ind, site", "#while all(x <= thr_a for x in new_coord): for i in range(-3, 3):", "for j in range(2) for k in range(2)] #Remove the lattice point associated", "colors. If false, make one basis group one color. #Get a copy of", "np import nglview as ngl basis=cell.copy() superCell = cell.copy() prim_vec = np.asarray(cell.lattice.as_dict().get('matrix')) #Append", "from pymatgen import Structure, Lattice import numpy as np import nglview as ngl", "corners) f=[[i,j,k] for i in range(2) for j in range(2) for k in", "range(len(f))] else: [superCell.append(atom.specie, atom.frac_coords+f[site]) for site in range(len(f)) for atom in basis] return(superCell)", "group one color. #Get a copy of the structure defining the basis basis=cell.copy()", "prim_vec[2]*k [superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True) for atom in basis] #k +=1 #print(new_coord)", "vectors until all coordinates exceed cubic cell [1,1,1] i = -1 j =", "coord_base.copy() #while all(x <=thr_a for x in new_coord): #while all(x <=thr_a for x", "+ new_coord, coords_are_cartesian=True) for atom in basis] #k +=1 #print(new_coord) #print(i, j, k)", "lattice points in the cubic unit cell (i.e all the corners) f=[[i,j,k] for", "cell to a structure - must have cubic lattice\"\"\" #diff_species: Boolean, if true,", "unit cell (i.e all the corners) f=[[i,j,k] for i in range(2) for j", "for x in new_coord): for i in range(-3, 3): for j in range(-3,", "ind, site in enumerate(SC.sites): if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15): selec=selec+[ind] view6 = ngl.show_pymatgen(SC) view6.clear_representations()", "list of all the lattice points in the cubic unit cell (i.e all", "visUC(SC, a3): from pymatgen import Structure, Lattice import nglview as ngl selec=[] for", "return(view6) def visUC(SC, a3): from pymatgen import Structure, Lattice import nglview as ngl", "return(superCell) def cubicCell(cell, a3): \"\"\"Append all sites in a unit cell to a", "in a unit cell to a structure\"\"\" from pymatgen import Structure, Lattice import", "ngl basis=cell.copy() superCell = cell.copy() prim_vec = np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms with different combinations", "\"\"\"Append all sites in a unit cell to a structure - must have", "atoms with different combinations of lattice vectors until all coordinates exceed cubic cell", "['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell() return(view6) def visUC(SC, a3): from pymatgen", "diff_species: [superCell.append(atom.species, atom.frac_coords+f[site]) for atom in basis for site in range(len(f))] else: [superCell.append(atom.specie,", "in range(2)] #Remove the lattice point associated with the basis [0,0,0] f=f[1:] #Add", "j = -1 k = -1 #Since not perfect thr_a = a3*1.15 coord_base", "#Since not perfect thr_a = a3*1.15 coord_base = [0,0,0] new_coord = coord_base.copy() #while", "range(-3,3): new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k [superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True) for", "lattice vectors until all coordinates exceed cubic cell [1,1,1] i = -1 j", "defining the basis basis=cell.copy() superCell = cell.copy() #Create a list of all the", "basis=cell.copy() superCell = cell.copy() #Create a list of all the lattice points in", "for atom in basis] #k +=1 #print(new_coord) #print(i, j, k) #j +=1 #k=-1", "nglview as ngl selec=[] for ind, site in enumerate(SC.sites): if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15):", "at each of the unit cell lattice points if diff_species: [superCell.append(atom.species, atom.frac_coords+f[site]) for", "species diff colors. If false, make one basis group one color. #Get a", "for atom in basis] return(superCell) def cubicCell(cell, a3): \"\"\"Append all sites in a", "cell (i.e all the corners) f=[[i,j,k] for i in range(2) for j in", "one basis group one color. #Get a copy of the structure defining the", "def make_supercell(cell, diff_species): \"\"\"Append all sites in a unit cell to a structure", "make one basis group one color. #Get a copy of the structure defining", "return(superCell) def visLattice(lattice): from pymatgen import Structure, Lattice import nglview as ngl unit_cell=", "thr_a = a3*1.15 coord_base = [0,0,0] new_coord = coord_base.copy() #while all(x <=thr_a for", "lattice points if diff_species: [superCell.append(atom.species, atom.frac_coords+f[site]) for atom in basis for site in", "+prim_vec[1]*j + prim_vec[2]*k #i+=1 #j=-1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k return(superCell)", "+prim_vec[1]*j + prim_vec[2]*k return(superCell) def visLattice(lattice): from pymatgen import Structure, Lattice import nglview", "#print(i, j, k) #j +=1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k #i+=1", "a3*1.15 coord_base = [0,0,0] new_coord = coord_base.copy() #while all(x <=thr_a for x in", "unit cell to a structure - must have cubic lattice\"\"\" #diff_species: Boolean, if", "<=thr_a for x in new_coord): #while all(x <= thr_a for x in new_coord):", "a basis at each of the unit cell lattice points if diff_species: [superCell.append(atom.species,", "basis at each of the unit cell lattice points if diff_species: [superCell.append(atom.species, atom.frac_coords+f[site])", "= cell.copy() #Create a list of all the lattice points in the cubic", "= prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k #i+=1 #j=-1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j +", "#diff_species: Boolean, if true, make different species diff colors. If false, make one", "of all the lattice points in the cubic unit cell (i.e all the", "x in new_coord): for i in range(-3, 3): for j in range(-3, 3):", "for x in new_coord): #while all(x <=thr_a for x in new_coord): #while all(x", "else: [superCell.append(atom.specie, atom.frac_coords+f[site]) for site in range(len(f)) for atom in basis] return(superCell) def", "+prim_vec[1]*j + prim_vec[2]*k [superCell.append(atom.species, atom.coords + new_coord, coords_are_cartesian=True) for atom in basis] #k", "with the basis [0,0,0] f=f[1:] #Add a basis at each of the unit", "atom in basis] #k +=1 #print(new_coord) #print(i, j, k) #j +=1 #k=-1 #new_coord", "import Structure, Lattice import nglview as ngl selec=[] for ind, site in enumerate(SC.sites):", "= -1 k = -1 #Since not perfect thr_a = a3*1.15 coord_base =", "as ngl unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell() return(view6) def", "#Append atoms with different combinations of lattice vectors until all coordinates exceed cubic", "\"\"\"Append all sites in a unit cell to a structure\"\"\" from pymatgen import", "point associated with the basis [0,0,0] f=f[1:] #Add a basis at each of", "view6 = ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick', aspectRatio=10, selection=selec) [view6.add_representation('ball+stick', aspectRatio=5, selection=[i]) for i in", "in basis] return(superCell) def cubicCell(cell, a3): \"\"\"Append all sites in a unit cell", "cell lattice points if diff_species: [superCell.append(atom.species, atom.frac_coords+f[site]) for atom in basis for site", "atom.frac_coords+f[site]) for site in range(len(f)) for atom in basis] return(superCell) def cubicCell(cell, a3):", "as ngl selec=[] for ind, site in enumerate(SC.sites): if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15): selec=selec+[ind]", "i = -1 j = -1 k = -1 #Since not perfect thr_a", "view6.add_unitcell() return(view6) def visUC(SC, a3): from pymatgen import Structure, Lattice import nglview as", "= ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick', aspectRatio=10, selection=selec) [view6.add_representation('ball+stick', aspectRatio=5, selection=[i]) for i in selec]", "f=f[1:] #Add a basis at each of the unit cell lattice points if", "import nglview as ngl selec=[] for ind, site in enumerate(SC.sites): if all(site.coords>=-a3*.15) &", "= coord_base.copy() #while all(x <=thr_a for x in new_coord): #while all(x <=thr_a for", "k in range(2)] #Remove the lattice point associated with the basis [0,0,0] f=f[1:]", "= np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms with different combinations of lattice vectors until all coordinates", "in a unit cell to a structure - must have cubic lattice\"\"\" #diff_species:", "[[0,0,0]], to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell() return(view6) def visUC(SC, a3): from pymatgen import", "the corners) f=[[i,j,k] for i in range(2) for j in range(2) for k", "selec=[] for ind, site in enumerate(SC.sites): if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15): selec=selec+[ind] view6 =", "for j in range(-3, 3): for k in range(-3,3): new_coord = prim_vec[0]*i +prim_vec[1]*j", "exceed cubic cell [1,1,1] i = -1 j = -1 k = -1", "prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k return(superCell) def visLattice(lattice): from pymatgen import Structure, Lattice import", "coords_are_cartesian=True) for atom in basis] #k +=1 #print(new_coord) #print(i, j, k) #j +=1", "lattice point associated with the basis [0,0,0] f=f[1:] #Add a basis at each", "the lattice points in the cubic unit cell (i.e all the corners) f=[[i,j,k]", "#Remove the lattice point associated with the basis [0,0,0] f=f[1:] #Add a basis", "to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell() return(view6) def visUC(SC, a3): from pymatgen import Structure,", "structure - must have cubic lattice\"\"\" #diff_species: Boolean, if true, make different species", "a structure\"\"\" from pymatgen import Structure, Lattice import numpy as np import nglview", "different combinations of lattice vectors until all coordinates exceed cubic cell [1,1,1] i", "x in new_coord): #while all(x <=thr_a for x in new_coord): #while all(x <=", "numpy as np import nglview as ngl basis=cell.copy() superCell = cell.copy() prim_vec =", "in range(2) for j in range(2) for k in range(2)] #Remove the lattice", "site in enumerate(SC.sites): if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15): selec=selec+[ind] view6 = ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick',", "in enumerate(SC.sites): if all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15): selec=selec+[ind] view6 = ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick', aspectRatio=10,", "#Create a list of all the lattice points in the cubic unit cell", "coordinates exceed cubic cell [1,1,1] i = -1 j = -1 k =", "import nglview as ngl unit_cell= Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell()", "cell.copy() prim_vec = np.asarray(cell.lattice.as_dict().get('matrix')) #Append atoms with different combinations of lattice vectors until", "Structure(lattice, ['Cl'], [[0,0,0]], to_unit_cell=True, coords_are_cartesian=False) view6=ngl.show_pymatgen(unit_cell) view6.clear_representations() view6.add_unitcell() return(view6) def visUC(SC, a3): from", "+=1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k #i+=1 #j=-1 #k=-1 #new_coord =", "all sites in a unit cell to a structure\"\"\" from pymatgen import Structure,", "all(site.coords>=-a3*.15) & all(site.coords<=a3*1.15): selec=selec+[ind] view6 = ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick', aspectRatio=10, selection=selec) [view6.add_representation('ball+stick', aspectRatio=5,", "different species diff colors. If false, make one basis group one color. #Get", "coord_base = [0,0,0] new_coord = coord_base.copy() #while all(x <=thr_a for x in new_coord):", "ngl.show_pymatgen(SC) view6.clear_representations() #view6.add_representation('ball+stick', aspectRatio=10, selection=selec) [view6.add_representation('ball+stick', aspectRatio=5, selection=[i]) for i in selec] return(view6)", "j, k) #j +=1 #k=-1 #new_coord = prim_vec[0]*i +prim_vec[1]*j + prim_vec[2]*k #i+=1 #j=-1", "cell.copy() #Create a list of all the lattice points in the cubic unit" ]
[ "for i in range(count): item = layout.itemAt(i).widget() if item == target: return layout,i", ": dst.setWindowIcon(src.windowIcon()) if src.windowIconText() : dst.setWindowIconText(src.windowIconText()) if src.windowModality() : dst.setWindowModality(src.windowModality()) if src.windowOpacity() :", "src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy()) if src.statusTip() : dst.setStatusTip(src.statusTip()) if src.style()", "Qt import QtWidgets # NOTE replaceWidget ---------------------------------------------------------------------------- def replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件 Parameters ----------", "------- QWidget [description] \"\"\" updateWidgetState(src,dst) layout = src.parent().layout() layout,index = getTargetLayoutIndex(layout,src) if not", "dst.setWindowIconText(src.windowIconText()) if src.windowModality() : dst.setWindowModality(src.windowModality()) if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity()) if src.windowRole() : dst.setWindowRole(src.windowRole())", "else: for child in layout.children(): layout,i = getTargetLayoutIndex(child,target) if layout: return layout,i return", "layout: print (u\"没有找到 %s 的 Layout,替换失败\" % src) return src layout.insertWidget(index,dst) src.setParent(None) return", "查询组件所在的 Layout 的序号 \"\"\" count = layout.count() for i in range(count): item =", ": dst.setMaximumSize(src.maximumSize()) if src.minimumSize() : dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking ()) if", "for _ in range(indent)]) + prefix for child in parent.children(): traverse_func = lambda:traverseChildren(child,indent=indent,prefix=prefix,childCallback=childCallback,printCallback=printCallback,log=log)", ":param indent: indentation space, defaults to \"\" :type indent: str, optional :param log:", "dst.setFocusProxy(src.focusProxy()) if src.font() : dst.setFont(src.font()) if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole()) if src.geometry() : dst.setGeometry(src.geometry())", "if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity()) if src.windowRole() : dst.setWindowRole(src.windowRole()) if src.windowState() : dst.setWindowState(src.windowState()) def", "= getTargetLayoutIndex(child,target) if layout: return layout,i return [None,None] # NOTE traverseChildren ---------------------------------------------------------------------------- def", ": dst.setWindowIconText(src.windowIconText()) if src.windowModality() : dst.setWindowModality(src.windowModality()) if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity()) if src.windowRole() :", "layout,i = getTargetLayoutIndex(child,target) if layout: return layout,i return [None,None] # NOTE traverseChildren ----------------------------------------------------------------------------", "src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints()) if src.layout() : dst.setLayout(src.layout()) if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection()) if src.locale()", "dst.setWindowModality(src.windowModality()) if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity()) if src.windowRole() : dst.setWindowRole(src.windowRole()) if src.windowState() : dst.setWindowState(src.windowState())", ": dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy() : dst.setFocusProxy(src.focusProxy()) if src.font() : dst.setFont(src.font()) if src.foregroundRole() :", "dst def updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态 Parameters ---------- src : QWidget 源组件 dst :", ": QLayout 查询组件所在的 Layout i : int 查询组件所在的 Layout 的序号 \"\"\" count =", "src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags() : dst.setWindowFlags(src.windowFlags()) if src.windowIcon() : dst.setWindowIcon(src.windowIcon()) if src.windowIconText()", "__email__ = '<EMAIL>' __date__ = '2019-12-12 20:09:01' \"\"\" 调用库 \"\"\" from Qt import", "if src.focusProxy() : dst.setFocusProxy(src.focusProxy()) if src.font() : dst.setFont(src.font()) if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole()) if", "u\"\"\"updateWidgetState 同步组件状态 Parameters ---------- src : QWidget 源组件 dst : QWidget 目标组件 \"\"\"", ": dst.setInputMethodHints(src.inputMethodHints()) if src.layout() : dst.setLayout(src.layout()) if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection()) if src.locale() :", "替换组件 Parameters ---------- src : QWidget 源组件 dst : QWidget 目标组件 Returns -------", "for child in parent.children(): traverse_func = lambda:traverseChildren(child,indent=indent,prefix=prefix,childCallback=childCallback,printCallback=printCallback,log=log) if callable(childCallback) : childCallback(child,traverse_func) else: traverse_func()", "dst : QWidget 目标组件 Returns ------- QWidget [description] \"\"\" updateWidgetState(src,dst) layout = src.parent().layout()", "目标组件 \"\"\" if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole() :", "的 Layout,替换失败\" % src) return src layout.insertWidget(index,dst) src.setParent(None) return dst def updateWidgetState(src,dst): u\"\"\"updateWidgetState", "dst.setBackgroundRole(src.backgroundRole()) if src.baseSize() : dst.setBaseSize(src.baseSize()) if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy())", "return prefix = \"\".join([\" \" for _ in range(indent)]) + prefix for child", "'2019-12-12 20:09:01' \"\"\" 调用库 \"\"\" from Qt import QtGui from Qt import QtCore", "Parameters ---------- layout : QLayout 通过 QLayout 递归遍历下属的组件 target : QWidget 要查询的组件 Returns", "return [None,None] # NOTE traverseChildren ---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse into the widget", ": dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor() : dst.setCursor(src.cursor()) if src.focusPolicy() :", "(prefix,parent) if not hasattr(parent,\"children\"): return prefix = \"\".join([\" \" for _ in range(indent)])", "= layout.itemAt(i).widget() if item == target: return layout,i else: for child in layout.children():", "通过 QLayout 递归遍历下属的组件 target : QWidget 要查询的组件 Returns ------- layout : QLayout 查询组件所在的", "() : dst.setMouseTracking(src.hasMouseTracking ()) if src.palette() : dst.setPalette(src.palette()) if src.parent() : dst.setParent(src.parent()) if", "\"\"\"traverseChildren Traverse into the widget children | print the children hierarchy :param parent:", "\"\"\" updateWidgetState(src,dst) layout = src.parent().layout() layout,index = getTargetLayoutIndex(layout,src) if not layout: print (u\"没有找到", "QWidget 源组件 dst : QWidget 目标组件 \"\"\" if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription()", "dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor() : dst.setCursor(src.cursor()) if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy() : dst.setFocusProxy(src.focusProxy())", ": dst.setParent(src.parent()) if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy()) if src.statusTip() :", "src.maximumSize() : dst.setMaximumSize(src.maximumSize()) if src.minimumSize() : dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking ())", ": dst.setWindowFlags(src.windowFlags()) if src.windowIcon() : dst.setWindowIcon(src.windowIcon()) if src.windowIconText() : dst.setWindowIconText(src.windowIconText()) if src.windowModality() :", "src.minimumSize() : dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking ()) if src.palette() : dst.setPalette(src.palette())", "if src.windowIcon() : dst.setWindowIcon(src.windowIcon()) if src.windowIconText() : dst.setWindowIconText(src.windowIconText()) if src.windowModality() : dst.setWindowModality(src.windowModality()) if", "src.geometry() : dst.setGeometry(src.geometry()) if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints()) if src.layout() : dst.setLayout(src.layout()) if src.layoutDirection()", "序号 Parameters ---------- layout : QLayout 通过 QLayout 递归遍历下属的组件 target : QWidget 要查询的组件", "dst.setLocale(src.locale()) if src.mask() : dst.setMask(src.mask()) if src.maximumSize() : dst.setMaximumSize(src.maximumSize()) if src.minimumSize() : dst.setMinimumSize(src.minimumSize())", "= 'timmyliang' __email__ = '<EMAIL>' __date__ = '2019-12-12 20:09:01' \"\"\" 调用库 \"\"\" from", "dst.setLayout(src.layout()) if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection()) if src.locale() : dst.setLocale(src.locale()) if src.mask() : dst.setMask(src.mask())", ": dst.setFont(src.font()) if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole()) if src.geometry() : dst.setGeometry(src.geometry()) if src.inputMethodHints() :", "def updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态 Parameters ---------- src : QWidget 源组件 dst : QWidget", "dst.setBaseSize(src.baseSize()) if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor() : dst.setCursor(src.cursor())", "if src.windowRole() : dst.setWindowRole(src.windowRole()) if src.windowState() : dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标 Layout", "dst.setWindowIcon(src.windowIcon()) if src.windowIconText() : dst.setWindowIconText(src.windowIconText()) if src.windowModality() : dst.setWindowModality(src.windowModality()) if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity())", "Returns ------- layout : QLayout 查询组件所在的 Layout i : int 查询组件所在的 Layout 的序号", ": dst.setForegroundRole(src.foregroundRole()) if src.geometry() : dst.setGeometry(src.geometry()) if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints()) if src.layout() :", "child in layout.children(): layout,i = getTargetLayoutIndex(child,target) if layout: return layout,i return [None,None] #", "\"\".join([\" \" for _ in range(indent)]) + prefix for child in parent.children(): traverse_func", "i : int 查询组件所在的 Layout 的序号 \"\"\" count = layout.count() for i in", ": dst.setLayout(src.layout()) if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection()) if src.locale() : dst.setLocale(src.locale()) if src.mask() :", ": QWidget 源组件 dst : QWidget 目标组件 \"\"\" if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops()) if", "\"\"\" from Qt import QtGui from Qt import QtCore from Qt import QtWidgets", ": dst.setCursor(src.cursor()) if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy() : dst.setFocusProxy(src.focusProxy()) if src.font() :", "print the children hierarchy :param parent: traverse widget :type parent: QWidget :param indent:", ": dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags() : dst.setWindowFlags(src.windowFlags()) if src.windowIcon() : dst.setWindowIcon(src.windowIcon()) if src.windowIconText() :", "optional :param log: print the data, defaults to False :type log: bool, optional", "dst.setMouseTracking(src.hasMouseTracking ()) if src.palette() : dst.setPalette(src.palette()) if src.parent() : dst.setParent(src.parent()) if src.sizeIncrement() :", "------- layout : QLayout 查询组件所在的 Layout i : int 查询组件所在的 Layout 的序号 \"\"\"", "if src.style() : dst.setStyle(src.style()) if src.toolTip() : dst.setToolTip(src.toolTip()) if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled()) if", "to \"\" :type indent: str, optional :param log: print the data, defaults to", "查询组件所在的 Layout i : int 查询组件所在的 Layout 的序号 \"\"\" count = layout.count() for", "hierarchy :param parent: traverse widget :type parent: QWidget :param indent: indentation space, defaults", "layout = src.parent().layout() layout,index = getTargetLayoutIndex(layout,src) if not layout: print (u\"没有找到 %s 的", ": dst.setWindowOpacity(src.windowOpacity()) if src.windowRole() : dst.setWindowRole(src.windowRole()) if src.windowState() : dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex", "src.cursor() : dst.setCursor(src.cursor()) if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy() : dst.setFocusProxy(src.focusProxy()) if src.font()", ": dst.setBackgroundRole(src.backgroundRole()) if src.baseSize() : dst.setBaseSize(src.baseSize()) if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy() :", "int 查询组件所在的 Layout 的序号 \"\"\" count = layout.count() for i in range(count): item", "(u\"没有找到 %s 的 Layout,替换失败\" % src) return src layout.insertWidget(index,dst) src.setParent(None) return dst def", ": QWidget 要查询的组件 Returns ------- layout : QLayout 查询组件所在的 Layout i : int", "dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags() : dst.setWindowFlags(src.windowFlags()) if src.windowIcon() : dst.setWindowIcon(src.windowIcon()) if src.windowIconText() : dst.setWindowIconText(src.windowIconText())", ": dst.setMouseTracking(src.hasMouseTracking ()) if src.palette() : dst.setPalette(src.palette()) if src.parent() : dst.setParent(src.parent()) if src.sizeIncrement()", "the data, defaults to False :type log: bool, optional \"\"\" if callable(printCallback): printCallback(prefix,parent)", "print (u\"没有找到 %s 的 Layout,替换失败\" % src) return src layout.insertWidget(index,dst) src.setParent(None) return dst", "item = layout.itemAt(i).widget() if item == target: return layout,i else: for child in", ": dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis() : dst.setWhatsThis(src.whatsThis()) if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags() :", "src.style() : dst.setStyle(src.style()) if src.toolTip() : dst.setToolTip(src.toolTip()) if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis()", "'timmyliang' __email__ = '<EMAIL>' __date__ = '2019-12-12 20:09:01' \"\"\" 调用库 \"\"\" from Qt", "Returns ------- QWidget [description] \"\"\" updateWidgetState(src,dst) layout = src.parent().layout() layout,index = getTargetLayoutIndex(layout,src) if", "if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole()) if", "dst.setMaximumSize(src.maximumSize()) if src.minimumSize() : dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking ()) if src.palette()", "from __future__ import print_function __author__ = 'timmyliang' __email__ = '<EMAIL>' __date__ = '2019-12-12", "layout.count() for i in range(count): item = layout.itemAt(i).widget() if item == target: return", "源组件 dst : QWidget 目标组件 \"\"\" if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription() :", "src.parent() : dst.setParent(src.parent()) if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy()) if src.statusTip()", "dst.setLayoutDirection(src.layoutDirection()) if src.locale() : dst.setLocale(src.locale()) if src.mask() : dst.setMask(src.mask()) if src.maximumSize() : dst.setMaximumSize(src.maximumSize())", "dst.setCursor(src.cursor()) if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy() : dst.setFocusProxy(src.focusProxy()) if src.font() : dst.setFont(src.font())", "str, optional :param log: print the data, defaults to False :type log: bool,", "dst.setWindowFlags(src.windowFlags()) if src.windowIcon() : dst.setWindowIcon(src.windowIcon()) if src.windowIconText() : dst.setWindowIconText(src.windowIconText()) if src.windowModality() : dst.setWindowModality(src.windowModality())", "= \"\".join([\" \" for _ in range(indent)]) + prefix for child in parent.children():", "prefix = \"\".join([\" \" for _ in range(indent)]) + prefix for child in", "dst.setInputMethodHints(src.inputMethodHints()) if src.layout() : dst.setLayout(src.layout()) if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection()) if src.locale() : dst.setLocale(src.locale())", "if layout: return layout,i return [None,None] # NOTE traverseChildren ---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren", "if src.toolTip() : dst.setToolTip(src.toolTip()) if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis() : dst.setWhatsThis(src.whatsThis()) if", "in layout.children(): layout,i = getTargetLayoutIndex(child,target) if layout: return layout,i return [None,None] # NOTE", "import QtCore from Qt import QtWidgets # NOTE replaceWidget ---------------------------------------------------------------------------- def replaceWidget(src,dst): u\"\"\"replaceWidget", "traverse widget :type parent: QWidget :param indent: indentation space, defaults to \"\" :type", "dst.setFont(src.font()) if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole()) if src.geometry() : dst.setGeometry(src.geometry()) if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints())", "src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking ()) if src.palette() : dst.setPalette(src.palette()) if src.parent() : dst.setParent(src.parent())", "src.windowFlags() : dst.setWindowFlags(src.windowFlags()) if src.windowIcon() : dst.setWindowIcon(src.windowIcon()) if src.windowIconText() : dst.setWindowIconText(src.windowIconText()) if src.windowModality()", "if src.mask() : dst.setMask(src.mask()) if src.maximumSize() : dst.setMaximumSize(src.maximumSize()) if src.minimumSize() : dst.setMinimumSize(src.minimumSize()) if", "if item == target: return layout,i else: for child in layout.children(): layout,i =", "src.sizePolicy() : dst.setSizePolicy(src.sizePolicy()) if src.statusTip() : dst.setStatusTip(src.statusTip()) if src.style() : dst.setStyle(src.style()) if src.toolTip()", "u\"\"\"replaceWidget 替换组件 Parameters ---------- src : QWidget 源组件 dst : QWidget 目标组件 Returns", "if src.palette() : dst.setPalette(src.palette()) if src.parent() : dst.setParent(src.parent()) if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement()) if", ":type log: bool, optional \"\"\" if callable(printCallback): printCallback(prefix,parent) elif log: print (prefix,parent) if", "dst.setMask(src.mask()) if src.maximumSize() : dst.setMaximumSize(src.maximumSize()) if src.minimumSize() : dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking () :", "return layout,i else: for child in layout.children(): layout,i = getTargetLayoutIndex(child,target) if layout: return", "---------- src : QWidget 源组件 dst : QWidget 目标组件 \"\"\" if src.acceptDrops() :", ": dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor() : dst.setCursor(src.cursor()) if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy() :", "获取目标 Layout 和 序号 Parameters ---------- layout : QLayout 通过 QLayout 递归遍历下属的组件 target", "src.windowRole() : dst.setWindowRole(src.windowRole()) if src.windowState() : dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标 Layout 和", "dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor() : dst.setCursor(src.cursor()) if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy())", "if src.geometry() : dst.setGeometry(src.geometry()) if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints()) if src.layout() : dst.setLayout(src.layout()) if", "= layout.count() for i in range(count): item = layout.itemAt(i).widget() if item == target:", "递归遍历下属的组件 target : QWidget 要查询的组件 Returns ------- layout : QLayout 查询组件所在的 Layout i", "src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole()) if src.baseSize() : dst.setBaseSize(src.baseSize()) if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy()", "if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis() : dst.setWhatsThis(src.whatsThis()) if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath()) if", "Traverse into the widget children | print the children hierarchy :param parent: traverse", "if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole()) if src.geometry() : dst.setGeometry(src.geometry()) if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints()) if", "if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags() : dst.setWindowFlags(src.windowFlags()) if src.windowIcon() : dst.setWindowIcon(src.windowIcon()) if", "log: bool, optional \"\"\" if callable(printCallback): printCallback(prefix,parent) elif log: print (prefix,parent) if not", "i in range(count): item = layout.itemAt(i).widget() if item == target: return layout,i else:", "u\"\"\"getTargetLayoutIndex 获取目标 Layout 和 序号 Parameters ---------- layout : QLayout 通过 QLayout 递归遍历下属的组件", "src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy() : dst.setFocusProxy(src.focusProxy()) if src.font() : dst.setFont(src.font()) if src.foregroundRole()", ": dst.setSizePolicy(src.sizePolicy()) if src.statusTip() : dst.setStatusTip(src.statusTip()) if src.style() : dst.setStyle(src.style()) if src.toolTip() :", "optional \"\"\" if callable(printCallback): printCallback(prefix,parent) elif log: print (prefix,parent) if not hasattr(parent,\"children\"): return", "---------- src : QWidget 源组件 dst : QWidget 目标组件 Returns ------- QWidget [description]", "indent: indentation space, defaults to \"\" :type indent: str, optional :param log: print", ": dst.setToolTip(src.toolTip()) if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis() : dst.setWhatsThis(src.whatsThis()) if src.windowFilePath() :", "layout,i else: for child in layout.children(): layout,i = getTargetLayoutIndex(child,target) if layout: return layout,i", "Layout,替换失败\" % src) return src layout.insertWidget(index,dst) src.setParent(None) return dst def updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态", "children hierarchy :param parent: traverse widget :type parent: QWidget :param indent: indentation space,", "if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole()) if src.baseSize() : dst.setBaseSize(src.baseSize()) if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins()) if", "src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity()) if src.windowRole() : dst.setWindowRole(src.windowRole()) if src.windowState() : dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target):", "src.toolTip() : dst.setToolTip(src.toolTip()) if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis() : dst.setWhatsThis(src.whatsThis()) if src.windowFilePath()", "range(indent)]) + prefix for child in parent.children(): traverse_func = lambda:traverseChildren(child,indent=indent,prefix=prefix,childCallback=childCallback,printCallback=printCallback,log=log) if callable(childCallback) :", "src.setParent(None) return dst def updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态 Parameters ---------- src : QWidget 源组件", "dst.setSizePolicy(src.sizePolicy()) if src.statusTip() : dst.setStatusTip(src.statusTip()) if src.style() : dst.setStyle(src.style()) if src.toolTip() : dst.setToolTip(src.toolTip())", "dst.setStyle(src.style()) if src.toolTip() : dst.setToolTip(src.toolTip()) if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis() : dst.setWhatsThis(src.whatsThis())", "()) if src.palette() : dst.setPalette(src.palette()) if src.parent() : dst.setParent(src.parent()) if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement())", "\"\"\" if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole())", "dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy() : dst.setFocusProxy(src.focusProxy()) if src.font() : dst.setFont(src.font()) if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole())", ": dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole()) if src.baseSize() : dst.setBaseSize(src.baseSize()) if src.contentsMargins() :", "| print the children hierarchy :param parent: traverse widget :type parent: QWidget :param", "src.parent().layout() layout,index = getTargetLayoutIndex(layout,src) if not layout: print (u\"没有找到 %s 的 Layout,替换失败\" %", ": dst.setWindowModality(src.windowModality()) if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity()) if src.windowRole() : dst.setWindowRole(src.windowRole()) if src.windowState() :", "src.whatsThis() : dst.setWhatsThis(src.whatsThis()) if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags() : dst.setWindowFlags(src.windowFlags()) if src.windowIcon()", "src : QWidget 源组件 dst : QWidget 目标组件 \"\"\" if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops())", "children | print the children hierarchy :param parent: traverse widget :type parent: QWidget", "dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis() : dst.setWhatsThis(src.whatsThis()) if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags() : dst.setWindowFlags(src.windowFlags())", "目标组件 Returns ------- QWidget [description] \"\"\" updateWidgetState(src,dst) layout = src.parent().layout() layout,index = getTargetLayoutIndex(layout,src)", "QWidget :param indent: indentation space, defaults to \"\" :type indent: str, optional :param", "dst.setPalette(src.palette()) if src.parent() : dst.setParent(src.parent()) if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy())", "layout,index = getTargetLayoutIndex(layout,src) if not layout: print (u\"没有找到 %s 的 Layout,替换失败\" % src)", "QLayout 通过 QLayout 递归遍历下属的组件 target : QWidget 要查询的组件 Returns ------- layout : QLayout", "updateWidgetState(src,dst) layout = src.parent().layout() layout,index = getTargetLayoutIndex(layout,src) if not layout: print (u\"没有找到 %s", "src.statusTip() : dst.setStatusTip(src.statusTip()) if src.style() : dst.setStyle(src.style()) if src.toolTip() : dst.setToolTip(src.toolTip()) if src.updatesEnabled()", "in range(count): item = layout.itemAt(i).widget() if item == target: return layout,i else: for", ": dst.setStyle(src.style()) if src.toolTip() : dst.setToolTip(src.toolTip()) if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis() :", "---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse into the widget children | print the children", "if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy() : dst.setFocusProxy(src.focusProxy()) if src.font() : dst.setFont(src.font()) if", "return dst def updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态 Parameters ---------- src : QWidget 源组件 dst", "\"\"\" count = layout.count() for i in range(count): item = layout.itemAt(i).widget() if item", "defaults to \"\" :type indent: str, optional :param log: print the data, defaults", "getTargetLayoutIndex(child,target) if layout: return layout,i return [None,None] # NOTE traverseChildren ---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False):", ":param log: print the data, defaults to False :type log: bool, optional \"\"\"", "同步组件状态 Parameters ---------- src : QWidget 源组件 dst : QWidget 目标组件 \"\"\" if", "if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor() : dst.setCursor(src.cursor()) if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy()) if", "return src layout.insertWidget(index,dst) src.setParent(None) return dst def updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态 Parameters ---------- src", "dst.setWindowRole(src.windowRole()) if src.windowState() : dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标 Layout 和 序号 Parameters", ": QLayout 通过 QLayout 递归遍历下属的组件 target : QWidget 要查询的组件 Returns ------- layout :", "---------------------------------------------------------------------------- def replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件 Parameters ---------- src : QWidget 源组件 dst :", "parent: traverse widget :type parent: QWidget :param indent: indentation space, defaults to \"\"", "# NOTE replaceWidget ---------------------------------------------------------------------------- def replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件 Parameters ---------- src : QWidget", "src.foregroundRole() : dst.setForegroundRole(src.foregroundRole()) if src.geometry() : dst.setGeometry(src.geometry()) if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints()) if src.layout()", "[description] \"\"\" updateWidgetState(src,dst) layout = src.parent().layout() layout,index = getTargetLayoutIndex(layout,src) if not layout: print", "Layout 和 序号 Parameters ---------- layout : QLayout 通过 QLayout 递归遍历下属的组件 target :", ": QWidget 目标组件 Returns ------- QWidget [description] \"\"\" updateWidgetState(src,dst) layout = src.parent().layout() layout,index", "not layout: print (u\"没有找到 %s 的 Layout,替换失败\" % src) return src layout.insertWidget(index,dst) src.setParent(None)", "QtWidgets # NOTE replaceWidget ---------------------------------------------------------------------------- def replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件 Parameters ---------- src :", "if src.windowFlags() : dst.setWindowFlags(src.windowFlags()) if src.windowIcon() : dst.setWindowIcon(src.windowIcon()) if src.windowIconText() : dst.setWindowIconText(src.windowIconText()) if", "prefix for child in parent.children(): traverse_func = lambda:traverseChildren(child,indent=indent,prefix=prefix,childCallback=childCallback,printCallback=printCallback,log=log) if callable(childCallback) : childCallback(child,traverse_func) else:", ": dst.setGeometry(src.geometry()) if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints()) if src.layout() : dst.setLayout(src.layout()) if src.layoutDirection() :", ": dst.setWindowRole(src.windowRole()) if src.windowState() : dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标 Layout 和 序号", ":type parent: QWidget :param indent: indentation space, defaults to \"\" :type indent: str,", ":param parent: traverse widget :type parent: QWidget :param indent: indentation space, defaults to", "in range(indent)]) + prefix for child in parent.children(): traverse_func = lambda:traverseChildren(child,indent=indent,prefix=prefix,childCallback=childCallback,printCallback=printCallback,log=log) if callable(childCallback)", "= getTargetLayoutIndex(layout,src) if not layout: print (u\"没有找到 %s 的 Layout,替换失败\" % src) return", "if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy()) if src.statusTip() : dst.setStatusTip(src.statusTip()) if src.style() : dst.setStyle(src.style()) if", "dst : QWidget 目标组件 \"\"\" if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription())", "调用库 \"\"\" from Qt import QtGui from Qt import QtCore from Qt import", "False :type log: bool, optional \"\"\" if callable(printCallback): printCallback(prefix,parent) elif log: print (prefix,parent)", "src.mask() : dst.setMask(src.mask()) if src.maximumSize() : dst.setMaximumSize(src.maximumSize()) if src.minimumSize() : dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking", "getTargetLayoutIndex(layout,src) if not layout: print (u\"没有找到 %s 的 Layout,替换失败\" % src) return src", "if src.windowState() : dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标 Layout 和 序号 Parameters ----------", "from Qt import QtGui from Qt import QtCore from Qt import QtWidgets #", "%s 的 Layout,替换失败\" % src) return src layout.insertWidget(index,dst) src.setParent(None) return dst def updateWidgetState(src,dst):", "if callable(printCallback): printCallback(prefix,parent) elif log: print (prefix,parent) if not hasattr(parent,\"children\"): return prefix =", "20:09:01' \"\"\" 调用库 \"\"\" from Qt import QtGui from Qt import QtCore from", "def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标 Layout 和 序号 Parameters ---------- layout : QLayout 通过", ":type indent: str, optional :param log: print the data, defaults to False :type", "__author__ = 'timmyliang' __email__ = '<EMAIL>' __date__ = '2019-12-12 20:09:01' \"\"\" 调用库 \"\"\"", "if src.windowModality() : dst.setWindowModality(src.windowModality()) if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity()) if src.windowRole() : dst.setWindowRole(src.windowRole()) if", "\" for _ in range(indent)]) + prefix for child in parent.children(): traverse_func =", "if src.cursor() : dst.setCursor(src.cursor()) if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy() : dst.setFocusProxy(src.focusProxy()) if", "src : QWidget 源组件 dst : QWidget 目标组件 Returns ------- QWidget [description] \"\"\"", "dst.setToolTip(src.toolTip()) if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis() : dst.setWhatsThis(src.whatsThis()) if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath())", "QtGui from Qt import QtCore from Qt import QtWidgets # NOTE replaceWidget ----------------------------------------------------------------------------", "src.layout() : dst.setLayout(src.layout()) if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection()) if src.locale() : dst.setLocale(src.locale()) if src.mask()", "__date__ = '2019-12-12 20:09:01' \"\"\" 调用库 \"\"\" from Qt import QtGui from Qt", "log: print (prefix,parent) if not hasattr(parent,\"children\"): return prefix = \"\".join([\" \" for _", ": dst.setMask(src.mask()) if src.maximumSize() : dst.setMaximumSize(src.maximumSize()) if src.minimumSize() : dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking ()", ": dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking ()) if src.palette() : dst.setPalette(src.palette()) if", "layout.children(): layout,i = getTargetLayoutIndex(child,target) if layout: return layout,i return [None,None] # NOTE traverseChildren", "的序号 \"\"\" count = layout.count() for i in range(count): item = layout.itemAt(i).widget() if", ": dst.setLocale(src.locale()) if src.mask() : dst.setMask(src.mask()) if src.maximumSize() : dst.setMaximumSize(src.maximumSize()) if src.minimumSize() :", "widget :type parent: QWidget :param indent: indentation space, defaults to \"\" :type indent:", "log: print the data, defaults to False :type log: bool, optional \"\"\" if", "item == target: return layout,i else: for child in layout.children(): layout,i = getTargetLayoutIndex(child,target)", "= src.parent().layout() layout,index = getTargetLayoutIndex(layout,src) if not layout: print (u\"没有找到 %s 的 Layout,替换失败\"", "# NOTE traverseChildren ---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse into the widget children |", "if src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking ()) if src.palette() : dst.setPalette(src.palette()) if src.parent() :", "printCallback(prefix,parent) elif log: print (prefix,parent) if not hasattr(parent,\"children\"): return prefix = \"\".join([\" \"", "dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole()) if src.baseSize() : dst.setBaseSize(src.baseSize())", "if src.windowIconText() : dst.setWindowIconText(src.windowIconText()) if src.windowModality() : dst.setWindowModality(src.windowModality()) if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity()) if", "dst.setWindowOpacity(src.windowOpacity()) if src.windowRole() : dst.setWindowRole(src.windowRole()) if src.windowState() : dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标", "== target: return layout,i else: for child in layout.children(): layout,i = getTargetLayoutIndex(child,target) if", ": dst.setFocusProxy(src.focusProxy()) if src.font() : dst.setFont(src.font()) if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole()) if src.geometry() :", "if src.statusTip() : dst.setStatusTip(src.statusTip()) if src.style() : dst.setStyle(src.style()) if src.toolTip() : dst.setToolTip(src.toolTip()) if", "callable(printCallback): printCallback(prefix,parent) elif log: print (prefix,parent) if not hasattr(parent,\"children\"): return prefix = \"\".join([\"", "QLayout 查询组件所在的 Layout i : int 查询组件所在的 Layout 的序号 \"\"\" count = layout.count()", "QWidget [description] \"\"\" updateWidgetState(src,dst) layout = src.parent().layout() layout,index = getTargetLayoutIndex(layout,src) if not layout:", ": QWidget 目标组件 \"\"\" if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription()) if", "hasattr(parent,\"children\"): return prefix = \"\".join([\" \" for _ in range(indent)]) + prefix for", "src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection()) if src.locale() : dst.setLocale(src.locale()) if src.mask() : dst.setMask(src.mask()) if src.maximumSize()", "to False :type log: bool, optional \"\"\" if callable(printCallback): printCallback(prefix,parent) elif log: print", "elif log: print (prefix,parent) if not hasattr(parent,\"children\"): return prefix = \"\".join([\" \" for", "'<EMAIL>' __date__ = '2019-12-12 20:09:01' \"\"\" 调用库 \"\"\" from Qt import QtGui from", "src.windowModality() : dst.setWindowModality(src.windowModality()) if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity()) if src.windowRole() : dst.setWindowRole(src.windowRole()) if src.windowState()", "replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件 Parameters ---------- src : QWidget 源组件 dst : QWidget 目标组件", "if not hasattr(parent,\"children\"): return prefix = \"\".join([\" \" for _ in range(indent)]) +", "from Qt import QtWidgets # NOTE replaceWidget ---------------------------------------------------------------------------- def replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件 Parameters", "QWidget 目标组件 \"\"\" if src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole()", "layout: return layout,i return [None,None] # NOTE traverseChildren ---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse", ": dst.setPalette(src.palette()) if src.parent() : dst.setParent(src.parent()) if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy() :", "traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse into the widget children | print the children hierarchy :param", "\"\"\" 调用库 \"\"\" from Qt import QtGui from Qt import QtCore from Qt", "Layout 的序号 \"\"\" count = layout.count() for i in range(count): item = layout.itemAt(i).widget()", "% src) return src layout.insertWidget(index,dst) src.setParent(None) return dst def updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态 Parameters", "dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy()) if src.statusTip() : dst.setStatusTip(src.statusTip()) if src.style() : dst.setStyle(src.style())", "NOTE traverseChildren ---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse into the widget children | print", "print (prefix,parent) if not hasattr(parent,\"children\"): return prefix = \"\".join([\" \" for _ in", "if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole()) if src.baseSize() : dst.setBaseSize(src.baseSize()) if", "if src.maximumSize() : dst.setMaximumSize(src.maximumSize()) if src.minimumSize() : dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking", "NOTE replaceWidget ---------------------------------------------------------------------------- def replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件 Parameters ---------- src : QWidget 源组件", "layout.itemAt(i).widget() if item == target: return layout,i else: for child in layout.children(): layout,i", "import QtGui from Qt import QtCore from Qt import QtWidgets # NOTE replaceWidget", "dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标 Layout 和 序号 Parameters ---------- layout : QLayout", "和 序号 Parameters ---------- layout : QLayout 通过 QLayout 递归遍历下属的组件 target : QWidget", "Layout i : int 查询组件所在的 Layout 的序号 \"\"\" count = layout.count() for i", "要查询的组件 Returns ------- layout : QLayout 查询组件所在的 Layout i : int 查询组件所在的 Layout", "if src.locale() : dst.setLocale(src.locale()) if src.mask() : dst.setMask(src.mask()) if src.maximumSize() : dst.setMaximumSize(src.maximumSize()) if", "getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标 Layout 和 序号 Parameters ---------- layout : QLayout 通过 QLayout", "dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking ()) if src.palette() : dst.setPalette(src.palette()) if src.parent()", "src layout.insertWidget(index,dst) src.setParent(None) return dst def updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态 Parameters ---------- src :", "# coding:utf-8 from __future__ import print_function __author__ = 'timmyliang' __email__ = '<EMAIL>' __date__", "__future__ import print_function __author__ = 'timmyliang' __email__ = '<EMAIL>' __date__ = '2019-12-12 20:09:01'", "QtCore from Qt import QtWidgets # NOTE replaceWidget ---------------------------------------------------------------------------- def replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件", "dst.setParent(src.parent()) if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy()) if src.statusTip() : dst.setStatusTip(src.statusTip())", "= '<EMAIL>' __date__ = '2019-12-12 20:09:01' \"\"\" 调用库 \"\"\" from Qt import QtGui", "import QtWidgets # NOTE replaceWidget ---------------------------------------------------------------------------- def replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件 Parameters ---------- src", "range(count): item = layout.itemAt(i).widget() if item == target: return layout,i else: for child", "indent: str, optional :param log: print the data, defaults to False :type log:", "if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor() : dst.setCursor(src.cursor()) if", "src.windowIconText() : dst.setWindowIconText(src.windowIconText()) if src.windowModality() : dst.setWindowModality(src.windowModality()) if src.windowOpacity() : dst.setWindowOpacity(src.windowOpacity()) if src.windowRole()", "layout,i return [None,None] # NOTE traverseChildren ---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse into the", "src) return src layout.insertWidget(index,dst) src.setParent(None) return dst def updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态 Parameters ----------", "src.locale() : dst.setLocale(src.locale()) if src.mask() : dst.setMask(src.mask()) if src.maximumSize() : dst.setMaximumSize(src.maximumSize()) if src.minimumSize()", "return layout,i return [None,None] # NOTE traverseChildren ---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse into", "if src.parent() : dst.setParent(src.parent()) if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy()) if", "Qt import QtCore from Qt import QtWidgets # NOTE replaceWidget ---------------------------------------------------------------------------- def replaceWidget(src,dst):", "dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole()) if src.baseSize() : dst.setBaseSize(src.baseSize()) if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins())", "dst.setWhatsThis(src.whatsThis()) if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags() : dst.setWindowFlags(src.windowFlags()) if src.windowIcon() : dst.setWindowIcon(src.windowIcon())", "\"\"\" if callable(printCallback): printCallback(prefix,parent) elif log: print (prefix,parent) if not hasattr(parent,\"children\"): return prefix", "def replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件 Parameters ---------- src : QWidget 源组件 dst : QWidget", ": dst.setBaseSize(src.baseSize()) if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor() :", "target : QWidget 要查询的组件 Returns ------- layout : QLayout 查询组件所在的 Layout i :", "= '2019-12-12 20:09:01' \"\"\" 调用库 \"\"\" from Qt import QtGui from Qt import", "if not layout: print (u\"没有找到 %s 的 Layout,替换失败\" % src) return src layout.insertWidget(index,dst)", "Parameters ---------- src : QWidget 源组件 dst : QWidget 目标组件 \"\"\" if src.acceptDrops()", ": dst.setStatusTip(src.statusTip()) if src.style() : dst.setStyle(src.style()) if src.toolTip() : dst.setToolTip(src.toolTip()) if src.updatesEnabled() :", "into the widget children | print the children hierarchy :param parent: traverse widget", "if src.minimumSize() : dst.setMinimumSize(src.minimumSize()) if src.hasMouseTracking () : dst.setMouseTracking(src.hasMouseTracking ()) if src.palette() :", "count = layout.count() for i in range(count): item = layout.itemAt(i).widget() if item ==", "widget children | print the children hierarchy :param parent: traverse widget :type parent:", "src.focusProxy() : dst.setFocusProxy(src.focusProxy()) if src.font() : dst.setFont(src.font()) if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole()) if src.geometry()", "parent: QWidget :param indent: indentation space, defaults to \"\" :type indent: str, optional", "Parameters ---------- src : QWidget 源组件 dst : QWidget 目标组件 Returns ------- QWidget", "updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态 Parameters ---------- src : QWidget 源组件 dst : QWidget 目标组件", "src.baseSize() : dst.setBaseSize(src.baseSize()) if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor()", "QWidget 源组件 dst : QWidget 目标组件 Returns ------- QWidget [description] \"\"\" updateWidgetState(src,dst) layout", "bool, optional \"\"\" if callable(printCallback): printCallback(prefix,parent) elif log: print (prefix,parent) if not hasattr(parent,\"children\"):", "src.acceptDrops() : dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole()) if src.baseSize()", "if src.layout() : dst.setLayout(src.layout()) if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection()) if src.locale() : dst.setLocale(src.locale()) if", "not hasattr(parent,\"children\"): return prefix = \"\".join([\" \" for _ in range(indent)]) + prefix", "src.contentsMargins() : dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor() : dst.setCursor(src.cursor()) if src.focusPolicy()", "def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse into the widget children | print the children hierarchy", "[None,None] # NOTE traverseChildren ---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse into the widget children", "src.windowIcon() : dst.setWindowIcon(src.windowIcon()) if src.windowIconText() : dst.setWindowIconText(src.windowIconText()) if src.windowModality() : dst.setWindowModality(src.windowModality()) if src.windowOpacity()", "traverseChildren ---------------------------------------------------------------------------- def traverseChildren(parent,childCallback=None,printCallback=None,indent=4,prefix=\"\",log=False): \"\"\"traverseChildren Traverse into the widget children | print the", "from Qt import QtCore from Qt import QtWidgets # NOTE replaceWidget ---------------------------------------------------------------------------- def", "if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection()) if src.locale() : dst.setLocale(src.locale()) if src.mask() : dst.setMask(src.mask()) if", "+ prefix for child in parent.children(): traverse_func = lambda:traverseChildren(child,indent=indent,prefix=prefix,childCallback=childCallback,printCallback=printCallback,log=log) if callable(childCallback) : childCallback(child,traverse_func)", "data, defaults to False :type log: bool, optional \"\"\" if callable(printCallback): printCallback(prefix,parent) elif", "print_function __author__ = 'timmyliang' __email__ = '<EMAIL>' __date__ = '2019-12-12 20:09:01' \"\"\" 调用库", "src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole()) if src.baseSize() : dst.setBaseSize(src.baseSize()) if src.contentsMargins()", "\"\" :type indent: str, optional :param log: print the data, defaults to False", ": dst.setWhatsThis(src.whatsThis()) if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags() : dst.setWindowFlags(src.windowFlags()) if src.windowIcon() :", ": dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标 Layout 和 序号 Parameters ---------- layout :", "dst.setGeometry(src.geometry()) if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints()) if src.layout() : dst.setLayout(src.layout()) if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection())", "if src.font() : dst.setFont(src.font()) if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole()) if src.geometry() : dst.setGeometry(src.geometry()) if", "replaceWidget ---------------------------------------------------------------------------- def replaceWidget(src,dst): u\"\"\"replaceWidget 替换组件 Parameters ---------- src : QWidget 源组件 dst", "the widget children | print the children hierarchy :param parent: traverse widget :type", "layout.insertWidget(index,dst) src.setParent(None) return dst def updateWidgetState(src,dst): u\"\"\"updateWidgetState 同步组件状态 Parameters ---------- src : QWidget", "the children hierarchy :param parent: traverse widget :type parent: QWidget :param indent: indentation", "if src.whatsThis() : dst.setWhatsThis(src.whatsThis()) if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags() : dst.setWindowFlags(src.windowFlags()) if", "indentation space, defaults to \"\" :type indent: str, optional :param log: print the", "源组件 dst : QWidget 目标组件 Returns ------- QWidget [description] \"\"\" updateWidgetState(src,dst) layout =", "src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled()) if src.whatsThis() : dst.setWhatsThis(src.whatsThis()) if src.windowFilePath() : dst.setWindowFilePath(src.windowFilePath()) if src.windowFlags()", "---------- layout : QLayout 通过 QLayout 递归遍历下属的组件 target : QWidget 要查询的组件 Returns -------", "if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints()) if src.layout() : dst.setLayout(src.layout()) if src.layoutDirection() : dst.setLayoutDirection(src.layoutDirection()) if", ": dst.setLayoutDirection(src.layoutDirection()) if src.locale() : dst.setLocale(src.locale()) if src.mask() : dst.setMask(src.mask()) if src.maximumSize() :", "QWidget 要查询的组件 Returns ------- layout : QLayout 查询组件所在的 Layout i : int 查询组件所在的", "layout : QLayout 查询组件所在的 Layout i : int 查询组件所在的 Layout 的序号 \"\"\" count", "src.palette() : dst.setPalette(src.palette()) if src.parent() : dst.setParent(src.parent()) if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy()", "QLayout 递归遍历下属的组件 target : QWidget 要查询的组件 Returns ------- layout : QLayout 查询组件所在的 Layout", "target: return layout,i else: for child in layout.children(): layout,i = getTargetLayoutIndex(child,target) if layout:", "dst.setForegroundRole(src.foregroundRole()) if src.geometry() : dst.setGeometry(src.geometry()) if src.inputMethodHints() : dst.setInputMethodHints(src.inputMethodHints()) if src.layout() : dst.setLayout(src.layout())", "if src.sizeIncrement() : dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy()) if src.statusTip() : dst.setStatusTip(src.statusTip()) if", "import print_function __author__ = 'timmyliang' __email__ = '<EMAIL>' __date__ = '2019-12-12 20:09:01' \"\"\"", "print the data, defaults to False :type log: bool, optional \"\"\" if callable(printCallback):", ": int 查询组件所在的 Layout 的序号 \"\"\" count = layout.count() for i in range(count):", "coding:utf-8 from __future__ import print_function __author__ = 'timmyliang' __email__ = '<EMAIL>' __date__ =", "src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy()) if src.cursor() : dst.setCursor(src.cursor()) if src.focusPolicy() : dst.setFocusPolicy(src.focusPolicy()) if src.focusProxy()", ": dst.setAcceptDrops(src.acceptDrops()) if src.accessibleDescription() : dst.setAccessibleDescription(src.accessibleDescription()) if src.backgroundRole() : dst.setBackgroundRole(src.backgroundRole()) if src.baseSize() :", "QWidget 目标组件 Returns ------- QWidget [description] \"\"\" updateWidgetState(src,dst) layout = src.parent().layout() layout,index =", "src.windowState() : dst.setWindowState(src.windowState()) def getTargetLayoutIndex(layout,target): u\"\"\"getTargetLayoutIndex 获取目标 Layout 和 序号 Parameters ---------- layout", ": QWidget 源组件 dst : QWidget 目标组件 Returns ------- QWidget [description] \"\"\" updateWidgetState(src,dst)", "_ in range(indent)]) + prefix for child in parent.children(): traverse_func = lambda:traverseChildren(child,indent=indent,prefix=prefix,childCallback=childCallback,printCallback=printCallback,log=log) if", ": dst.setSizeIncrement(src.sizeIncrement()) if src.sizePolicy() : dst.setSizePolicy(src.sizePolicy()) if src.statusTip() : dst.setStatusTip(src.statusTip()) if src.style() :", "dst.setStatusTip(src.statusTip()) if src.style() : dst.setStyle(src.style()) if src.toolTip() : dst.setToolTip(src.toolTip()) if src.updatesEnabled() : dst.setUpdatesEnabled(src.updatesEnabled())", "space, defaults to \"\" :type indent: str, optional :param log: print the data,", "Qt import QtGui from Qt import QtCore from Qt import QtWidgets # NOTE", "for child in layout.children(): layout,i = getTargetLayoutIndex(child,target) if layout: return layout,i return [None,None]", "src.font() : dst.setFont(src.font()) if src.foregroundRole() : dst.setForegroundRole(src.foregroundRole()) if src.geometry() : dst.setGeometry(src.geometry()) if src.inputMethodHints()", "layout : QLayout 通过 QLayout 递归遍历下属的组件 target : QWidget 要查询的组件 Returns ------- layout", "defaults to False :type log: bool, optional \"\"\" if callable(printCallback): printCallback(prefix,parent) elif log:", "if src.baseSize() : dst.setBaseSize(src.baseSize()) if src.contentsMargins() : dst.setContentsMargins(src.contentsMargins()) if src.contextMenuPolicy() : dst.setContextMenuPolicy(src.contextMenuPolicy()) if" ]
[ "cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17, 24, 23, 7, 3])", "self.assertEqual(cdll1.get_items(), [17, 24, 23, 7, 3, 24, 23, 7, 3, 17]) if __name__", "cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2 = CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(),", "cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24, 23, 7, 3]) def test_merge_cdlls(self): cdll1 =", "cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17, 24, 23, 7, 3]) def test_delete_node(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24)", "23, 7, 3]) def test_merge_cdlls(self): cdll1 = CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2", "= CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24, 23, 7, 3]) def", "23, 7, 3]) def test_delete_node(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first)", "<filename>Heap/TestCircularDoublyLinkedList.py # encoding-utf-8 import unittest from CircularDoublyLinkedList import CircularDoublyLinkedList class TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self):", "TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17, 24,", "cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24, 23, 7, 3]) def test_merge_cdlls(self): cdll1 = CircularDoublyLinkedList(17) cdll1.insert_new_node(24)", "cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17, 24, 23, 7, 3, 24, 23, 7, 3, 17]) if", "23, 7, 3, 24, 23, 7, 3, 17]) if __name__ == \"__main__\": unittest.main()", "CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17, 24, 23, 7, 3]) def test_delete_node(self):", "self.assertEqual(cdll.get_items(), [24, 23, 7, 3]) def test_merge_cdlls(self): cdll1 = CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7)", "test_merge_cdlls(self): cdll1 = CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2 = CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23)", "# encoding-utf-8 import unittest from CircularDoublyLinkedList import CircularDoublyLinkedList class TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self): cdll", "[17, 24, 23, 7, 3]) def test_delete_node(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7)", "self.assertEqual(cdll.get_items(), [17, 24, 23, 7, 3]) def test_delete_node(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23)", "= CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17, 24, 23, 7, 3,", "= CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17, 24, 23, 7, 3]) def", "cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17, 24, 23, 7, 3, 24, 23, 7,", "cdll2 = CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17, 24, 23, 7,", "7, 3]) def test_merge_cdlls(self): cdll1 = CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2 =", "24, 23, 7, 3]) def test_delete_node(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3)", "cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24, 23, 7, 3])", "CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2 = CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2)", "cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17, 24, 23, 7, 3, 24, 23,", "class TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17,", "CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17, 24, 23, 7, 3, 24,", "cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24, 23, 7, 3]) def test_merge_cdlls(self): cdll1 = CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23)", "def test_delete_node(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24, 23,", "CircularDoublyLinkedList import CircularDoublyLinkedList class TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7)", "cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17, 24, 23, 7, 3]) def test_delete_node(self): cdll", "cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17, 24, 23, 7, 3, 24, 23, 7, 3,", "cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17, 24, 23, 7, 3]) def test_delete_node(self): cdll = CircularDoublyLinkedList(17)", "cdll1.insert_new_node(3) cdll2 = CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17, 24, 23,", "cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24, 23, 7, 3]) def test_merge_cdlls(self): cdll1", "cdll1 = CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2 = CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7)", "cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17, 24, 23, 7, 3]) def test_delete_node(self): cdll =", "import unittest from CircularDoublyLinkedList import CircularDoublyLinkedList class TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self): cdll = CircularDoublyLinkedList(17)", "= CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2 = CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3)", "[17, 24, 23, 7, 3, 24, 23, 7, 3, 17]) if __name__ ==", "CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24, 23, 7, 3]) def test_merge_cdlls(self):", "cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24, 23, 7, 3]) def test_merge_cdlls(self): cdll1 = CircularDoublyLinkedList(17)", "7, 3]) def test_delete_node(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(),", "CircularDoublyLinkedList class TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(),", "3]) def test_merge_cdlls(self): cdll1 = CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2 = CircularDoublyLinkedList(17)", "from CircularDoublyLinkedList import CircularDoublyLinkedList class TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23)", "def test_simple_list(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17, 24, 23,", "cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2 = CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17,", "def test_merge_cdlls(self): cdll1 = CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2 = CircularDoublyLinkedList(17) cdll2.insert_new_node(24)", "import CircularDoublyLinkedList class TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3)", "24, 23, 7, 3, 24, 23, 7, 3, 17]) if __name__ == \"__main__\":", "3]) def test_delete_node(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24,", "test_delete_node(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) cdll.delete_node(cdll.first) self.assertEqual(cdll.get_items(), [24, 23, 7,", "cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17, 24, 23, 7, 3, 24, 23, 7, 3, 17])", "encoding-utf-8 import unittest from CircularDoublyLinkedList import CircularDoublyLinkedList class TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self): cdll =", "unittest from CircularDoublyLinkedList import CircularDoublyLinkedList class TestCircularDoublyLinkedList(unittest.TestCase): def test_simple_list(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24)", "cdll1.insert_new_node(7) cdll1.insert_new_node(3) cdll2 = CircularDoublyLinkedList(17) cdll2.insert_new_node(24) cdll2.insert_new_node(23) cdll2.insert_new_node(7) cdll2.insert_new_node(3) cdll1.merge_cdlls(cdll2) self.assertEqual(cdll1.get_items(), [17, 24,", "[24, 23, 7, 3]) def test_merge_cdlls(self): cdll1 = CircularDoublyLinkedList(17) cdll1.insert_new_node(24) cdll1.insert_new_node(23) cdll1.insert_new_node(7) cdll1.insert_new_node(3)", "test_simple_list(self): cdll = CircularDoublyLinkedList(17) cdll.insert_new_node(24) cdll.insert_new_node(23) cdll.insert_new_node(7) cdll.insert_new_node(3) self.assertEqual(cdll.get_items(), [17, 24, 23, 7," ]
[ "# renWin.Render() # prevent the tk window from showing up then start the", "= vtkGetDataRoot() # Create the RenderWindow, Renderer and both Actors # ren1 =", "from showing up then start the event loop threshold = 15 # ---", "points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i = i + 1 scalars = vtk.vtkFloatArray() i = 0 while", "i = i + 1 scalars = vtk.vtkFloatArray() i = 0 while i", "# Create the RenderWindow, Renderer and both Actors # ren1 = vtk.vtkRenderer() renWin", "vtk.vtkMath() points = vtk.vtkPoints() i = 0 while i < 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i", "profile = vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) # triangulate them # shepard = vtk.vtkShepardMethod() shepard.SetInputData(profile)", "shepard.Update() map = vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block = vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0) # Add the", "< 50: scalars.InsertValue(i,math.Random(0,1)) i = i + 1 profile = vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars)", "= vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) # triangulate them # shepard = vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5)", "the background and size # ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera() cam1 = ren1.GetActiveCamera() cam1.Azimuth(160)", "window from showing up then start the event loop threshold = 15 #", "renWin.Render() # prevent the tk window from showing up then start the event", "Actors # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin)", "50: scalars.InsertValue(i,math.Random(0,1)) i = i + 1 profile = vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) #", "Renderer and both Actors # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren", "= 0 while i < 50: scalars.InsertValue(i,math.Random(0,1)) i = i + 1 profile", "vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard SetMaximumDistance .1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update() map = vtk.vtkDataSetMapper()", "cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render() # render the image # renWin.Render() # prevent the", "renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create some points # math = vtk.vtkMath()", "renderer, set the background and size # ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera() cam1 =", "shepard SetMaximumDistance .1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update() map = vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block = vtk.vtkActor()", "= vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create some", "prevent the tk window from showing up then start the event loop threshold", "vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create some points", "shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update() map = vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block = vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0) #", "vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() #", "the image # renWin.Render() # prevent the tk window from showing up then", "from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create", "# math = vtk.vtkMath() points = vtk.vtkPoints() i = 0 while i <", "and both Actors # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren =", "create some points # math = vtk.vtkMath() points = vtk.vtkPoints() i = 0", "iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create some points # math = vtk.vtkMath() points", "vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) # triangulate them # shepard = vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) #", "i < 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i = i + 1 scalars = vtk.vtkFloatArray() i", "and size # ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera() cam1 = ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5)", "+ 1 profile = vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) # triangulate them # shepard =", "# prevent the tk window from showing up then start the event loop", "= vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard SetMaximumDistance .1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update() map =", "ren1.ResetCamera() cam1 = ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render() # render the image", "ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera() cam1 = ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render() # render", "Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create the RenderWindow, Renderer", "map.SetInputConnection(shepard.GetOutputPort()) block = vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0) # Add the actors to the renderer,", "i + 1 scalars = vtk.vtkFloatArray() i = 0 while i < 50:", "Create the RenderWindow, Renderer and both Actors # ren1 = vtk.vtkRenderer() renWin =", "python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT =", "triangulate them # shepard = vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard SetMaximumDistance .1 shepard.SetNullValue(1)", "while i < 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i = i + 1 scalars = vtk.vtkFloatArray()", "= 0 while i < 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i = i + 1 scalars", "image # renWin.Render() # prevent the tk window from showing up then start", "from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create the RenderWindow, Renderer and", "i = i + 1 profile = vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) # triangulate them", "i = 0 while i < 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i = i + 1", "+ 1 scalars = vtk.vtkFloatArray() i = 0 while i < 50: scalars.InsertValue(i,math.Random(0,1))", "= vtk.vtkPoints() i = 0 while i < 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i = i", "renWin.Render() # render the image # renWin.Render() # prevent the tk window from", "some points # math = vtk.vtkMath() points = vtk.vtkPoints() i = 0 while", "Add the actors to the renderer, set the background and size # ren1.AddActor(block)", "VTK_DATA_ROOT = vtkGetDataRoot() # Create the RenderWindow, Renderer and both Actors # ren1", "set the background and size # ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera() cam1 = ren1.GetActiveCamera()", "# Add the actors to the renderer, set the background and size #", "block = vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0) # Add the actors to the renderer, set", "# create some points # math = vtk.vtkMath() points = vtk.vtkPoints() i =", "< 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i = i + 1 scalars = vtk.vtkFloatArray() i =", "render the image # renWin.Render() # prevent the tk window from showing up", "shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard SetMaximumDistance .1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update() map = vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort())", "# shepard SetMaximumDistance .1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update() map = vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block =", "vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create some points # math = vtk.vtkMath() points = vtk.vtkPoints()", "background and size # ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera() cam1 = ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30)", "0 while i < 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i = i + 1 scalars =", "cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render() # render the image # renWin.Render() # prevent", "tk window from showing up then start the event loop threshold = 15", "start the event loop threshold = 15 # --- end of script --", "then start the event loop threshold = 15 # --- end of script", "renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create some points #", "cam1 = ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render() # render the image #", "<reponame>esean/stl_voro_fill<gh_stars>1-10 #!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot", "#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT", "i + 1 profile = vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) # triangulate them # shepard", "cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render() # render the image # renWin.Render() # prevent the tk", "both Actors # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor()", "import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot()", "= vtk.vtkFloatArray() i = 0 while i < 50: scalars.InsertValue(i,math.Random(0,1)) i = i", ".1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update() map = vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block = vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0)", "vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block = vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0) # Add the actors to the", "i < 50: scalars.InsertValue(i,math.Random(0,1)) i = i + 1 profile = vtk.vtkPolyData() profile.SetPoints(points)", "map = vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block = vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0) # Add the actors", "the actors to the renderer, set the background and size # ren1.AddActor(block) ren1.SetBackground(1,1,1)", "block.GetProperty().SetColor(1,0,0) # Add the actors to the renderer, set the background and size", "renWin.SetSize(400,400) ren1.ResetCamera() cam1 = ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render() # render the", "scalars = vtk.vtkFloatArray() i = 0 while i < 50: scalars.InsertValue(i,math.Random(0,1)) i =", "= ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render() # render the image # renWin.Render()", "# shepard = vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard SetMaximumDistance .1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update()", "size # ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera() cam1 = ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange()", "vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create the RenderWindow, Renderer and both Actors #", "1 scalars = vtk.vtkFloatArray() i = 0 while i < 50: scalars.InsertValue(i,math.Random(0,1)) i", "= vtk.vtkMath() points = vtk.vtkPoints() i = 0 while i < 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1))", "1 profile = vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) # triangulate them # shepard = vtk.vtkShepardMethod()", "vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create the RenderWindow, Renderer and both", "# triangulate them # shepard = vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard SetMaximumDistance .1", "# ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) #", "ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create", "= i + 1 profile = vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) # triangulate them #", "them # shepard = vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard SetMaximumDistance .1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20)", "actors to the renderer, set the background and size # ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400)", "vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create the", "showing up then start the event loop threshold = 15 # --- end", "points # math = vtk.vtkMath() points = vtk.vtkPoints() i = 0 while i", "i = 0 while i < 50: scalars.InsertValue(i,math.Random(0,1)) i = i + 1", "SetMaximumDistance .1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update() map = vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block = vtk.vtkActor() block.SetMapper(map)", "RenderWindow, Renderer and both Actors # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1)", "math = vtk.vtkMath() points = vtk.vtkPoints() i = 0 while i < 50:", "# render the image # renWin.Render() # prevent the tk window from showing", "vtk.vtkFloatArray() i = 0 while i < 50: scalars.InsertValue(i,math.Random(0,1)) i = i +", "import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create the RenderWindow,", "the RenderWindow, Renderer and both Actors # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow()", "0 while i < 50: scalars.InsertValue(i,math.Random(0,1)) i = i + 1 profile =", "50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i = i + 1 scalars = vtk.vtkFloatArray() i = 0", "shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard SetMaximumDistance .1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update() map = vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block", "shepard = vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard SetMaximumDistance .1 shepard.SetNullValue(1) shepard.SetSampleDimensions(20,20,20) shepard.Update() map", "while i < 50: scalars.InsertValue(i,math.Random(0,1)) i = i + 1 profile = vtk.vtkPolyData()", "vtk.vtkPoints() i = 0 while i < 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i = i +", "profile.GetPointData().SetScalars(scalars) # triangulate them # shepard = vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard SetMaximumDistance", "= vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0) # Add the actors to the renderer, set the", "# ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera() cam1 = ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render()", "points = vtk.vtkPoints() i = 0 while i < 50: points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1)) i =", "profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) # triangulate them # shepard = vtk.vtkShepardMethod() shepard.SetInputData(profile) shepard.SetModelBounds(0,1,0,1,.1,.5) # shepard", "to the renderer, set the background and size # ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera()", "scalars.InsertValue(i,math.Random(0,1)) i = i + 1 profile = vtk.vtkPolyData() profile.SetPoints(points) profile.GetPointData().SetScalars(scalars) # triangulate", "the renderer, set the background and size # ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera() cam1", "vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create some points # math =", "vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0) # Add the actors to the renderer, set the background", "up then start the event loop threshold = 15 # --- end of", "ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render() # render the image # renWin.Render() #", "= vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create some points # math = vtk.vtkMath() points =", "ren1.AddActor(block) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.ResetCamera() cam1 = ren1.GetActiveCamera() cam1.Azimuth(160) cam1.Elevation(30) cam1.Zoom(1.5) ren1.ResetCameraClippingRange() renWin.Render() #", "= vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block = vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0) # Add the actors to", "= i + 1 scalars = vtk.vtkFloatArray() i = 0 while i <", "shepard.SetSampleDimensions(20,20,20) shepard.Update() map = vtk.vtkDataSetMapper() map.SetInputConnection(shepard.GetOutputPort()) block = vtk.vtkActor() block.SetMapper(map) block.GetProperty().SetColor(1,0,0) # Add", "the event loop threshold = 15 # --- end of script -- #iren.Start()", "= vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create some points # math", "import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create the RenderWindow, Renderer and both Actors", "iren.SetRenderWindow(renWin) # create some points # math = vtk.vtkMath() points = vtk.vtkPoints() i", "the tk window from showing up then start the event loop threshold =", "vtkGetDataRoot() # Create the RenderWindow, Renderer and both Actors # ren1 = vtk.vtkRenderer()", "block.SetMapper(map) block.GetProperty().SetColor(1,0,0) # Add the actors to the renderer, set the background and", "ren1.ResetCameraClippingRange() renWin.Render() # render the image # renWin.Render() # prevent the tk window" ]
[ "datetime, logging, time from os.path import join, dirname from weblogger import addLogEntry import", "dirname from weblogger import addLogEntry import voiceProxySettings from voiceProxyUtilities import setEarlyReturn, earlyReturn logging_comp_name", "Watson to wait until they are ready to talk again. # Need some", "message = postCheckClientWaitState(message) return message def preCheckClientWaitState(message): return message def checkClientWaitState(message): return message", "#--- This is for the user to put Watson in a wait state", "message = checkClientWaitState(message) message = postCheckClientWaitState(message) return message def preCheckClientWaitState(message): return message def", "Need some key words or phrase to jump out of this state. #", "time from os.path import join, dirname from weblogger import addLogEntry import voiceProxySettings from", "------ def waitState(message): message = preCheckClientWaitState(message) message = checkClientWaitState(message) message = postCheckClientWaitState(message) return", "------------------------------------------------ # IMPORTS ---------------------------------------- # ------------------------------------------------ ##### # Python dist and 3rd party", "until they are ready to talk again. # Need some key words or", "users wants Watson to wait until they are ready to talk again. #", "message = preCheckClientWaitState(message) message = checkClientWaitState(message) message = postCheckClientWaitState(message) return message def preCheckClientWaitState(message):", "to wait until they are ready to talk again. # Need some key", "voiceProxySettings from voiceProxyUtilities import setEarlyReturn, earlyReturn logging_comp_name = \"checkClientWaitState\" # This is because", "= preCheckClientWaitState(message) message = checkClientWaitState(message) message = postCheckClientWaitState(message) return message def preCheckClientWaitState(message): return", "def checkClientWaitState(message): return message def postCheckClientWaitState(message): return message #------ End From Client Wait", "ready to talk again. # Need some key words or phrase to jump", "import addLogEntry import voiceProxySettings from voiceProxyUtilities import setEarlyReturn, earlyReturn logging_comp_name = \"checkClientWaitState\" #", "logging, time from os.path import join, dirname from weblogger import addLogEntry import voiceProxySettings", "dist and 3rd party libraries ##### import os, requests, json, string, datetime, logging,", "# ------------------------------------------------ ##### # Python dist and 3rd party libraries ##### import os,", "os.path import join, dirname from weblogger import addLogEntry import voiceProxySettings from voiceProxyUtilities import", "libraries ##### import os, requests, json, string, datetime, logging, time from os.path import", "requests, json, string, datetime, logging, time from os.path import join, dirname from weblogger", "import voiceProxySettings from voiceProxyUtilities import setEarlyReturn, earlyReturn logging_comp_name = \"checkClientWaitState\" # This is", "##### # Python dist and 3rd party libraries ##### import os, requests, json,", "addLogEntry import voiceProxySettings from voiceProxyUtilities import setEarlyReturn, earlyReturn logging_comp_name = \"checkClientWaitState\" # This", "of this state. # will need the RespTimeout Signal to be updated also.", "message def preCheckClientWaitState(message): return message def checkClientWaitState(message): return message def postCheckClientWaitState(message): return message", "from voiceProxyUtilities import setEarlyReturn, earlyReturn logging_comp_name = \"checkClientWaitState\" # This is because the", "setEarlyReturn, earlyReturn logging_comp_name = \"checkClientWaitState\" # This is because the users wants Watson", "import os, requests, json, string, datetime, logging, time from os.path import join, dirname", "because the users wants Watson to wait until they are ready to talk", "for the user to put Watson in a wait state ------ def waitState(message):", "need the RespTimeout Signal to be updated also. #--- This is for the", "voiceProxyUtilities import setEarlyReturn, earlyReturn logging_comp_name = \"checkClientWaitState\" # This is because the users", "checkClientWaitState(message): return message def postCheckClientWaitState(message): return message #------ End From Client Wait state", "wait until they are ready to talk again. # Need some key words", "to jump out of this state. # will need the RespTimeout Signal to", "party libraries ##### import os, requests, json, string, datetime, logging, time from os.path", "def preCheckClientWaitState(message): return message def checkClientWaitState(message): return message def postCheckClientWaitState(message): return message #------", "message def postCheckClientWaitState(message): return message #------ End From Client Wait state Methods ---------------------", "wait state ------ def waitState(message): message = preCheckClientWaitState(message) message = checkClientWaitState(message) message =", "json, string, datetime, logging, time from os.path import join, dirname from weblogger import", "checkClientWaitState(message) message = postCheckClientWaitState(message) return message def preCheckClientWaitState(message): return message def checkClientWaitState(message): return", "be updated also. #--- This is for the user to put Watson in", "waitState(message): message = preCheckClientWaitState(message) message = checkClientWaitState(message) message = postCheckClientWaitState(message) return message def", "3rd party libraries ##### import os, requests, json, string, datetime, logging, time from", "out of this state. # will need the RespTimeout Signal to be updated", "IMPORTS ---------------------------------------- # ------------------------------------------------ ##### # Python dist and 3rd party libraries #####", "# Need some key words or phrase to jump out of this state.", "from weblogger import addLogEntry import voiceProxySettings from voiceProxyUtilities import setEarlyReturn, earlyReturn logging_comp_name =", "again. # Need some key words or phrase to jump out of this", "will need the RespTimeout Signal to be updated also. #--- This is for", "some key words or phrase to jump out of this state. # will", "talk again. # Need some key words or phrase to jump out of", "string, datetime, logging, time from os.path import join, dirname from weblogger import addLogEntry", "the user to put Watson in a wait state ------ def waitState(message): message", "also. #--- This is for the user to put Watson in a wait", "user to put Watson in a wait state ------ def waitState(message): message =", "import join, dirname from weblogger import addLogEntry import voiceProxySettings from voiceProxyUtilities import setEarlyReturn,", "and 3rd party libraries ##### import os, requests, json, string, datetime, logging, time", "Python dist and 3rd party libraries ##### import os, requests, json, string, datetime,", "join, dirname from weblogger import addLogEntry import voiceProxySettings from voiceProxyUtilities import setEarlyReturn, earlyReturn", "---------------------------------------- # ------------------------------------------------ ##### # Python dist and 3rd party libraries ##### import", "is for the user to put Watson in a wait state ------ def", "# This is because the users wants Watson to wait until they are", "This is because the users wants Watson to wait until they are ready", "or phrase to jump out of this state. # will need the RespTimeout", "weblogger import addLogEntry import voiceProxySettings from voiceProxyUtilities import setEarlyReturn, earlyReturn logging_comp_name = \"checkClientWaitState\"", "postCheckClientWaitState(message) return message def preCheckClientWaitState(message): return message def checkClientWaitState(message): return message def postCheckClientWaitState(message):", "to put Watson in a wait state ------ def waitState(message): message = preCheckClientWaitState(message)", "put Watson in a wait state ------ def waitState(message): message = preCheckClientWaitState(message) message", "= postCheckClientWaitState(message) return message def preCheckClientWaitState(message): return message def checkClientWaitState(message): return message def", "the users wants Watson to wait until they are ready to talk again.", "this state. # will need the RespTimeout Signal to be updated also. #---", "the RespTimeout Signal to be updated also. #--- This is for the user", "Watson in a wait state ------ def waitState(message): message = preCheckClientWaitState(message) message =", "phrase to jump out of this state. # will need the RespTimeout Signal", "##### import os, requests, json, string, datetime, logging, time from os.path import join,", "return message def checkClientWaitState(message): return message def postCheckClientWaitState(message): return message #------ End From", "# IMPORTS ---------------------------------------- # ------------------------------------------------ ##### # Python dist and 3rd party libraries", "# ------------------------------------------------ # IMPORTS ---------------------------------------- # ------------------------------------------------ ##### # Python dist and 3rd", "# Python dist and 3rd party libraries ##### import os, requests, json, string,", "\"checkClientWaitState\" # This is because the users wants Watson to wait until they", "RespTimeout Signal to be updated also. #--- This is for the user to", "preCheckClientWaitState(message): return message def checkClientWaitState(message): return message def postCheckClientWaitState(message): return message #------ End", "in a wait state ------ def waitState(message): message = preCheckClientWaitState(message) message = checkClientWaitState(message)", "state ------ def waitState(message): message = preCheckClientWaitState(message) message = checkClientWaitState(message) message = postCheckClientWaitState(message)", "import setEarlyReturn, earlyReturn logging_comp_name = \"checkClientWaitState\" # This is because the users wants", "return message def postCheckClientWaitState(message): return message #------ End From Client Wait state Methods", "message def checkClientWaitState(message): return message def postCheckClientWaitState(message): return message #------ End From Client", "state. # will need the RespTimeout Signal to be updated also. #--- This", "This is for the user to put Watson in a wait state ------", "preCheckClientWaitState(message) message = checkClientWaitState(message) message = postCheckClientWaitState(message) return message def preCheckClientWaitState(message): return message", "jump out of this state. # will need the RespTimeout Signal to be", "a wait state ------ def waitState(message): message = preCheckClientWaitState(message) message = checkClientWaitState(message) message", "logging_comp_name = \"checkClientWaitState\" # This is because the users wants Watson to wait", "are ready to talk again. # Need some key words or phrase to", "from os.path import join, dirname from weblogger import addLogEntry import voiceProxySettings from voiceProxyUtilities", "earlyReturn logging_comp_name = \"checkClientWaitState\" # This is because the users wants Watson to", "is because the users wants Watson to wait until they are ready to", "words or phrase to jump out of this state. # will need the", "Signal to be updated also. #--- This is for the user to put", "updated also. #--- This is for the user to put Watson in a", "------------------------------------------------ ##### # Python dist and 3rd party libraries ##### import os, requests,", "= \"checkClientWaitState\" # This is because the users wants Watson to wait until", "= checkClientWaitState(message) message = postCheckClientWaitState(message) return message def preCheckClientWaitState(message): return message def checkClientWaitState(message):", "return message def preCheckClientWaitState(message): return message def checkClientWaitState(message): return message def postCheckClientWaitState(message): return", "to talk again. # Need some key words or phrase to jump out", "# will need the RespTimeout Signal to be updated also. #--- This is", "wants Watson to wait until they are ready to talk again. # Need", "os, requests, json, string, datetime, logging, time from os.path import join, dirname from", "def waitState(message): message = preCheckClientWaitState(message) message = checkClientWaitState(message) message = postCheckClientWaitState(message) return message", "to be updated also. #--- This is for the user to put Watson", "key words or phrase to jump out of this state. # will need", "they are ready to talk again. # Need some key words or phrase" ]
[ "= state @swarm.group() @click.pass_context def preset(ctx: click.Context): state: SwarmModeState = ctx.obj @preset.command('ls') @click.option('--preset',", "from swarm_cli.lib import SwarmModeState, load_env_files, run_cmd @click.group() @click.option('--environment', '-e', multiple=True, required=False) @click.pass_context def", "load_env_files, run_cmd @click.group() @click.option('--environment', '-e', multiple=True, required=False) @click.pass_context def swarm(ctx: click.Context, environment: List[str]):", "@click.option('--dry-run', is_flag=True) @click.pass_context def preset_deploy(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState", "click.secho(\" - {}:{}\".format(k, v['variant'])) @preset.command('deploy') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True)", "str, dump_cmd: str): # state: SwarmModeState = ctx.obj # for name_variant_elem in name_variant:", "state.ensure_stack_exists(name, variant) # cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) #", "for preset in state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset)) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\" -", "is_flag=True) @click.pass_context def preset_deploy(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState =", "SwarmModeState, load_env_files, run_cmd @click.group() @click.option('--environment', '-e', multiple=True, required=False) @click.pass_context def swarm(ctx: click.Context, environment:", "preset: str): state: SwarmModeState = ctx.obj if preset: state.ensure_preset(preset) for k, v in", "= k, v['variant'] state.prepare_build_folder(preset, name, variant) cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build'])", "'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant)) @preset.command('build') @click.option('--preset', '-p', help=\"Select", "import click from swarm_cli.lib import SwarmModeState, load_env_files, run_cmd @click.group() @click.option('--environment', '-e', multiple=True, required=False)", "' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push']) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant) ) # @swarm.group()", "cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push']) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant) )", "- {}:{}\".format(k, v['variant'])) @preset.command('deploy') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context", "ignore_missing=True) stacks = state.cfg['presets'][preset]['stacks'] for k, v in stacks.items(): name, variant = k,", "# def stack(): # pass # # # @stack.command('ls') # @click.pass_context # def", "variant = k, v['variant'] cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push']) run_cmd(cmd, dry_run=dry_run,", "is_flag=True) # @click.pass_context # def stack_deploy(ctx: click.Context, name_variant: str, dump_cmd: str): # state:", "def swarm(ctx: click.Context, environment: List[str]): load_env_files(environment) state = SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj = state", "'-p', help=\"Select a preset\", required=False) @click.pass_context def preset_ls(ctx: click.Context, preset: str): state: SwarmModeState", "state.cfg['presets'][preset]['stacks'] for k, v in stacks.items(): name, variant = k, v['variant'] cmd =", "# name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # cmd = ' '.join(['docker',", "cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) # env = state.get_environment_for_stack(preset,", "def preset_push(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset)", "= ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build']) run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset, name, variant), env=state.get_environment_for_stack(preset, name,", "run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant)) @preset.command('build') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run',", "ctx.obj state.ensure_preset(preset) preset_data = state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []), ignore_missing=True) stacks = state.cfg['presets'][preset]['stacks'] for k,", "a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_build(ctx: click.Context, preset: str = None,", "= ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant))", "variant), name]) # env = state.get_environment_for_stack(preset, name, variant) # run_cmd(cmd, dry_run=dump_cmd, env=env) #", "# @stack.command('ls') # @click.pass_context # def stack_ls(ctx: click.Context): # state: SwarmModeState = ctx.obj", "SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj = state @swarm.group() @click.pass_context def preset(ctx: click.Context): state: SwarmModeState =", "click.echo(\"\\t {}\".format(stack_variant)) # # # @stack.command('deploy') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) #", ") # @swarm.group() # def stack(): # pass # # # @stack.command('ls') #", "# for name_variant_elem in name_variant: # name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant)", "SwarmModeState = ctx.obj # click.echo('Available stacks:') # for stack_name in sorted(state.layered_stacks.keys()): # click.echo(stack_name)", "stack_deploy(ctx: click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState = ctx.obj # for", "env = state.get_environment_for_stack(preset, name, variant) # run_cmd(cmd, dry_run=dump_cmd, env=env) # # # @stack.command('setup')", "dry_run=dump_cmd, env=env) # # # @stack.command('setup') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) #", "preset_push(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data", "' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant)) @preset.command('build')", "v['variant'])) else: for preset in state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset)) for k, v in state.cfg['presets'][preset]['stacks'].items():", "in sorted(state.layered_stacks.keys()): # click.echo(stack_name) # for stack_variant in sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t {}\".format(stack_variant)) #", "from typing import List import click from swarm_cli.lib import SwarmModeState, load_env_files, run_cmd @click.group()", "@click.pass_context def preset_deploy(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj", "variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # cmd = ' '.join(['docker', 'stack', 'deploy',", "variant), env=state.get_environment_for_stack(preset, name, variant) ) @preset.command('push') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run',", "# # # @stack.command('deploy') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context #", "= ctx.obj state.ensure_preset(preset) preset_data = state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []), ignore_missing=True) stacks = state.cfg['presets'][preset]['stacks'] for", "# click.echo('Available stacks:') # for stack_name in sorted(state.layered_stacks.keys()): # click.echo(stack_name) # for stack_variant", "state.cfg['presets'][preset]['stacks'] for k, v in stacks.items(): name, variant = k, v['variant'] state.prepare_build_folder(preset, name,", "preset: state.ensure_preset(preset) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant'])) else: for preset in", "SwarmModeState = ctx.obj @preset.command('ls') @click.option('--preset', '-p', help=\"Select a preset\", required=False) @click.pass_context def preset_ls(ctx:", "= state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []), ignore_missing=True) stacks = state.cfg['presets'][preset]['stacks'] for k, v in stacks.items():", "state.initFromFile('swarm-config.yml') ctx.obj = state @swarm.group() @click.pass_context def preset(ctx: click.Context): state: SwarmModeState = ctx.obj", "variant)) @preset.command('build') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_build(ctx:", "stacks.items(): name, variant = k, v['variant'] cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push'])", "env=state.get_environment_for_stack(preset, name, variant) ) @preset.command('push') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True)", "in stacks.items(): name, variant = k, v['variant'] cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant),", "' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build']) run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset, name, variant), env=state.get_environment_for_stack(preset, name, variant)", "v['variant'] cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset,", "= state.cfg['presets'][preset]['stacks'] for k, v in stacks.items(): name, variant = k, v['variant'] cmd", "cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build']) run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset, name, variant), env=state.get_environment_for_stack(preset,", "name_variant: str, dump_cmd: str): # state: SwarmModeState = ctx.obj # for name_variant_elem in", "click.echo('Available stacks:') # for stack_name in sorted(state.layered_stacks.keys()): # click.echo(stack_name) # for stack_variant in", "@click.option('--environment', '-e', multiple=True, required=False) @click.pass_context def swarm(ctx: click.Context, environment: List[str]): load_env_files(environment) state =", "else: for preset in state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset)) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"", "state.cfg['presets'][preset]['stacks'].items(): click.secho(\" - {}:{}\".format(k, v['variant'])) @preset.command('deploy') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run',", "name]) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant)) @preset.command('build') @click.option('--preset', '-p', help=\"Select a preset\", required=True)", "in stacks.items(): name, variant = k, v['variant'] state.prepare_build_folder(preset, name, variant) cmd = '", "v['variant'] state.prepare_build_folder(preset, name, variant) cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build']) run_cmd(cmd, dry_run=dry_run,", "ctx.obj = state @swarm.group() @click.pass_context def preset(ctx: click.Context): state: SwarmModeState = ctx.obj @preset.command('ls')", "v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\" - {}:{}\".format(k, v['variant'])) @preset.command('deploy') @click.option('--preset', '-p', help=\"Select a preset\",", "{}\".format(stack_variant)) # # # @stack.command('deploy') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context", "in state.cfg['presets'][preset]['stacks'].items(): click.secho(\" - {}:{}\".format(k, v['variant'])) @preset.command('deploy') @click.option('--preset', '-p', help=\"Select a preset\", required=True)", "stacks.items(): name, variant = k, v['variant'] cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name,", "state @swarm.group() @click.pass_context def preset(ctx: click.Context): state: SwarmModeState = ctx.obj @preset.command('ls') @click.option('--preset', '-p',", "swarm(ctx: click.Context, environment: List[str]): load_env_files(environment) state = SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj = state @swarm.group()", "is_flag=True) # @click.pass_context # def stack_setup(ctx: click.Context, name_variant: str, dump_cmd: str): # state:", "v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant'])) else: for preset in state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset)) for", "nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_setup(ctx: click.Context, name_variant: str, dump_cmd:", "variant = k, v['variant'] state.prepare_build_folder(preset, name, variant) cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant),", "in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant'])) else: for preset in state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset)) for k,", "def stack_deploy(ctx: click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState = ctx.obj #", "= ctx.obj # for name_variant_elem in name_variant: # name, variant = name_variant_elem.split(':') #", "pass # # # @stack.command('ls') # @click.pass_context # def stack_ls(ctx: click.Context): # state:", "@swarm.group() @click.pass_context def preset(ctx: click.Context): state: SwarmModeState = ctx.obj @preset.command('ls') @click.option('--preset', '-p', help=\"Select", "@preset.command('build') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_build(ctx: click.Context,", "@swarm.group() # def stack(): # pass # # # @stack.command('ls') # @click.pass_context #", "= None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data = state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []),", "# cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) # env =", "run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant) ) # @swarm.group() # def stack(): # pass", "a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_push(ctx: click.Context, preset: str = None,", "variant) ) @preset.command('push') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def", "v['variant'])) @preset.command('deploy') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_deploy(ctx:", "a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_deploy(ctx: click.Context, preset: str = None,", "click.Context, environment: List[str]): load_env_files(environment) state = SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj = state @swarm.group() @click.pass_context", "@click.option('--dry-run', is_flag=True) @click.pass_context def preset_push(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState", "def stack(): # pass # # # @stack.command('ls') # @click.pass_context # def stack_ls(ctx:", "help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_build(ctx: click.Context, preset: str =", "@click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_deploy(ctx: click.Context, name_variant: str,", "'-e', multiple=True, required=False) @click.pass_context def swarm(ctx: click.Context, environment: List[str]): load_env_files(environment) state = SwarmModeState()", "for stack_name in sorted(state.layered_stacks.keys()): # click.echo(stack_name) # for stack_variant in sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t", "@click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_setup(ctx: click.Context, name_variant: str,", "for name_variant_elem in name_variant: # name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) #", "str): state: SwarmModeState = ctx.obj if preset: state.ensure_preset(preset) for k, v in state.cfg['presets'][preset]['stacks'].items():", "for k, v in stacks.items(): name, variant = k, v['variant'] state.prepare_build_folder(preset, name, variant)", "[]), ignore_missing=True) stacks = state.cfg['presets'][preset]['stacks'] for k, v in stacks.items(): name, variant =", "def preset_ls(ctx: click.Context, preset: str): state: SwarmModeState = ctx.obj if preset: state.ensure_preset(preset) for", "List import click from swarm_cli.lib import SwarmModeState, load_env_files, run_cmd @click.group() @click.option('--environment', '-e', multiple=True,", "name_variant_elem in name_variant: # name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # state.ensure_preconditions(name,", "# @click.pass_context # def stack_setup(ctx: click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState", "'-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_push(ctx: click.Context, preset: str", "name, variant = k, v['variant'] cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant),", "click.secho(\"{}:{}\".format(k, v['variant'])) else: for preset in state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset)) for k, v in", "click.secho(\"Preset {}\".format(preset)) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\" - {}:{}\".format(k, v['variant'])) @preset.command('deploy') @click.option('--preset',", "<gh_stars>0 from typing import List import click from swarm_cli.lib import SwarmModeState, load_env_files, run_cmd", "'.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build']) run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset, name, variant), env=state.get_environment_for_stack(preset, name, variant) )", "is_flag=True) @click.pass_context def preset_push(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState =", "state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []), ignore_missing=True) stacks = state.cfg['presets'][preset]['stacks'] for k, v in stacks.items(): name,", "is_flag=True) @click.pass_context def preset_build(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState =", "{}:{}\".format(k, v['variant'])) @preset.command('deploy') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def", "name, variant = k, v['variant'] state.prepare_build_folder(preset, name, variant) cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name,", "ctx.obj # click.echo('Available stacks:') # for stack_name in sorted(state.layered_stacks.keys()): # click.echo(stack_name) # for", "# state.ensure_stack_exists(name, variant) # cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name])", "# @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_setup(ctx: click.Context, name_variant:", "stack_ls(ctx: click.Context): # state: SwarmModeState = ctx.obj # click.echo('Available stacks:') # for stack_name", "required=False) @click.pass_context def swarm(ctx: click.Context, environment: List[str]): load_env_files(environment) state = SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj", "# state: SwarmModeState = ctx.obj # click.echo('Available stacks:') # for stack_name in sorted(state.layered_stacks.keys()):", "# env = state.get_environment_for_stack(preset, name, variant) # run_cmd(cmd, dry_run=dump_cmd, env=env) # # #", "state.build_deploy_sequence_for_stack(name, variant), name]) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant)) @preset.command('build') @click.option('--preset', '-p', help=\"Select a", "name, variant)) @preset.command('build') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def", "click.Context): state: SwarmModeState = ctx.obj @preset.command('ls') @click.option('--preset', '-p', help=\"Select a preset\", required=False) @click.pass_context", "name, variant), env=state.get_environment_for_stack(preset, name, variant) ) @preset.command('push') @click.option('--preset', '-p', help=\"Select a preset\", required=True)", "help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_push(ctx: click.Context, preset: str =", "# @click.pass_context # def stack_ls(ctx: click.Context): # state: SwarmModeState = ctx.obj # click.echo('Available", "click.Context, preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data =", "None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data = state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []), ignore_missing=True)", "@click.pass_context def preset_push(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj", "v['variant'] cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push']) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant)", "preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_push(ctx: click.Context, preset: str = None, dry_run=False):", "state: SwarmModeState = ctx.obj # click.echo('Available stacks:') # for stack_name in sorted(state.layered_stacks.keys()): #", "sorted(state.layered_stacks.keys()): # click.echo(stack_name) # for stack_variant in sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t {}\".format(stack_variant)) # #", "preset\", required=False) @click.pass_context def preset_ls(ctx: click.Context, preset: str): state: SwarmModeState = ctx.obj if", "= ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push']) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant) ) #", "# pass # # # @stack.command('ls') # @click.pass_context # def stack_ls(ctx: click.Context): #", "# for stack_name in sorted(state.layered_stacks.keys()): # click.echo(stack_name) # for stack_variant in sorted(state.layered_stacks[stack_name].keys()): #", "name_variant_elem in name_variant: # name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # cmd", "required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_push(ctx: click.Context, preset: str = None, dry_run=False): state:", "'push']) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant) ) # @swarm.group() # def stack(): #", "a preset\", required=False) @click.pass_context def preset_ls(ctx: click.Context, preset: str): state: SwarmModeState = ctx.obj", "variant) cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build']) run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset, name, variant),", "# @click.pass_context # def stack_deploy(ctx: click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState", "@stack.command('setup') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_setup(ctx: click.Context,", "= SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj = state @swarm.group() @click.pass_context def preset(ctx: click.Context): state: SwarmModeState", "preset in state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset)) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\" - {}:{}\".format(k,", "@click.option('--preset', '-p', help=\"Select a preset\", required=False) @click.pass_context def preset_ls(ctx: click.Context, preset: str): state:", "in state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset)) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\" - {}:{}\".format(k, v['variant']))", "SwarmModeState = ctx.obj # for name_variant_elem in name_variant: # name, variant = name_variant_elem.split(':')", "# @swarm.group() # def stack(): # pass # # # @stack.command('ls') # @click.pass_context", "for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\" - {}:{}\".format(k, v['variant'])) @preset.command('deploy') @click.option('--preset', '-p', help=\"Select", "@click.pass_context # def stack_ls(ctx: click.Context): # state: SwarmModeState = ctx.obj # click.echo('Available stacks:')", "variant = k, v['variant'] cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name])", "nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_deploy(ctx: click.Context, name_variant: str, dump_cmd:", "preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data = state.cfg['presets'][preset]", "environment: List[str]): load_env_files(environment) state = SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj = state @swarm.group() @click.pass_context def", "# # @stack.command('setup') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def", "= state.cfg['presets'][preset]['stacks'] for k, v in stacks.items(): name, variant = k, v['variant'] state.prepare_build_folder(preset,", "env=env) # # # @stack.command('setup') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context", "dry_run=dry_run, cwd=state.get_build_folder(preset, name, variant), env=state.get_environment_for_stack(preset, name, variant) ) @preset.command('push') @click.option('--preset', '-p', help=\"Select a", "# # # @stack.command('ls') # @click.pass_context # def stack_ls(ctx: click.Context): # state: SwarmModeState", "variant) # cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) # env", "preset(ctx: click.Context): state: SwarmModeState = ctx.obj @preset.command('ls') @click.option('--preset', '-p', help=\"Select a preset\", required=False)", "@click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_deploy(ctx: click.Context, name_variant: str, dump_cmd: str): #", "= k, v['variant'] cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push']) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset,", "{}\".format(preset)) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\" - {}:{}\".format(k, v['variant'])) @preset.command('deploy') @click.option('--preset', '-p',", "preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_build(ctx: click.Context, preset: str = None, dry_run=False):", "for stack_variant in sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t {}\".format(stack_variant)) # # # @stack.command('deploy') # @click.argument('name_variant',", "state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset)) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\" - {}:{}\".format(k, v['variant'])) @preset.command('deploy')", "# @stack.command('deploy') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_deploy(ctx:", "List[str]): load_env_files(environment) state = SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj = state @swarm.group() @click.pass_context def preset(ctx:", "@click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_setup(ctx: click.Context, name_variant: str, dump_cmd: str): #", "stacks = state.cfg['presets'][preset]['stacks'] for k, v in stacks.items(): name, variant = k, v['variant']", "@stack.command('ls') # @click.pass_context # def stack_ls(ctx: click.Context): # state: SwarmModeState = ctx.obj #", "import SwarmModeState, load_env_files, run_cmd @click.group() @click.option('--environment', '-e', multiple=True, required=False) @click.pass_context def swarm(ctx: click.Context,", "for k, v in stacks.items(): name, variant = k, v['variant'] cmd = '", "in stacks.items(): name, variant = k, v['variant'] cmd = ' '.join(['docker', 'stack', 'deploy',", "preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_deploy(ctx: click.Context, preset: str = None, dry_run=False):", "@click.pass_context def preset_ls(ctx: click.Context, preset: str): state: SwarmModeState = ctx.obj if preset: state.ensure_preset(preset)", "multiple=True, required=False) @click.pass_context def swarm(ctx: click.Context, environment: List[str]): load_env_files(environment) state = SwarmModeState() state.initFromFile('swarm-config.yml')", "def preset_build(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset)", "# @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_deploy(ctx: click.Context, name_variant: str, dump_cmd: str):", "run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset, name, variant), env=state.get_environment_for_stack(preset, name, variant) ) @preset.command('push') @click.option('--preset', '-p', help=\"Select", "ctx.obj @preset.command('ls') @click.option('--preset', '-p', help=\"Select a preset\", required=False) @click.pass_context def preset_ls(ctx: click.Context, preset:", "dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data = state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []), ignore_missing=True) stacks", "required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_deploy(ctx: click.Context, preset: str = None, dry_run=False): state:", "# for stack_variant in sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t {}\".format(stack_variant)) # # # @stack.command('deploy') #", "variant) # run_cmd(cmd, dry_run=dump_cmd, env=env) # # # @stack.command('setup') # @click.argument('name_variant', nargs=-1) #", "'-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_deploy(ctx: click.Context, preset: str", "# # # @stack.command('setup') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context #", "variant), 'push']) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant) ) # @swarm.group() # def stack():", "for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant'])) else: for preset in state.cfg['presets'].keys(): click.secho(\"Preset", "k, v in stacks.items(): name, variant = k, v['variant'] cmd = ' '.join(['docker-compose',", "name_variant: # name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # state.ensure_preconditions(name, variant, dump_cmd=dump_cmd)", "'build']) run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset, name, variant), env=state.get_environment_for_stack(preset, name, variant) ) @preset.command('push') @click.option('--preset', '-p',", "# click.echo(\"\\t {}\".format(stack_variant)) # # # @stack.command('deploy') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True)", "v in stacks.items(): name, variant = k, v['variant'] state.prepare_build_folder(preset, name, variant) cmd =", "# def stack_ls(ctx: click.Context): # state: SwarmModeState = ctx.obj # click.echo('Available stacks:') #", "variant), 'build']) run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset, name, variant), env=state.get_environment_for_stack(preset, name, variant) ) @preset.command('push') @click.option('--preset',", "cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name,", "# @stack.command('setup') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_setup(ctx:", "@click.pass_context # def stack_setup(ctx: click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState =", "stack_setup(ctx: click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState = ctx.obj # for", "@preset.command('ls') @click.option('--preset', '-p', help=\"Select a preset\", required=False) @click.pass_context def preset_ls(ctx: click.Context, preset: str):", "load_env_files(preset_data.get('env_files', []), ignore_missing=True) stacks = state.cfg['presets'][preset]['stacks'] for k, v in stacks.items(): name, variant", "click from swarm_cli.lib import SwarmModeState, load_env_files, run_cmd @click.group() @click.option('--environment', '-e', multiple=True, required=False) @click.pass_context", "# click.echo(stack_name) # for stack_variant in sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t {}\".format(stack_variant)) # # #", "@click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_build(ctx: click.Context, preset:", "# state: SwarmModeState = ctx.obj # for name_variant_elem in name_variant: # name, variant", "@click.pass_context def preset_build(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj", "cwd=state.get_build_folder(preset, name, variant), env=state.get_environment_for_stack(preset, name, variant) ) @preset.command('push') @click.option('--preset', '-p', help=\"Select a preset\",", "click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState = ctx.obj # for name_variant_elem", "click.Context): # state: SwarmModeState = ctx.obj # click.echo('Available stacks:') # for stack_name in", "variant) ) # @swarm.group() # def stack(): # pass # # # @stack.command('ls')", "'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) # env = state.get_environment_for_stack(preset, name, variant) # run_cmd(cmd, dry_run=dump_cmd,", "stacks:') # for stack_name in sorted(state.layered_stacks.keys()): # click.echo(stack_name) # for stack_variant in sorted(state.layered_stacks[stack_name].keys()):", "def preset_deploy(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset)", "state.build_deploy_sequence_for_stack(name, variant), name]) # env = state.get_environment_for_stack(preset, name, variant) # run_cmd(cmd, dry_run=dump_cmd, env=env)", "env=state.get_environment_for_stack(preset, name, variant)) @preset.command('build') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context", "name, variant) ) # @swarm.group() # def stack(): # pass # # #", "ctx.obj if preset: state.ensure_preset(preset) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant'])) else: for", "state.build_compose_sequence_for_stack(name, variant), 'build']) run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset, name, variant), env=state.get_environment_for_stack(preset, name, variant) ) @preset.command('push')", "click.Context, preset: str): state: SwarmModeState = ctx.obj if preset: state.ensure_preset(preset) for k, v", "state: SwarmModeState = ctx.obj if preset: state.ensure_preset(preset) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k,", "stack_name in sorted(state.layered_stacks.keys()): # click.echo(stack_name) # for stack_variant in sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t {}\".format(stack_variant))", "@preset.command('push') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_push(ctx: click.Context,", "state: SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data = state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []), ignore_missing=True) stacks =", "'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant)) @preset.command('build') @click.option('--preset', '-p',", "state: SwarmModeState = ctx.obj # for name_variant_elem in name_variant: # name, variant =", "preset_ls(ctx: click.Context, preset: str): state: SwarmModeState = ctx.obj if preset: state.ensure_preset(preset) for k,", "dump_cmd: str): # state: SwarmModeState = ctx.obj # for name_variant_elem in name_variant: #", "stack(): # pass # # # @stack.command('ls') # @click.pass_context # def stack_ls(ctx: click.Context):", "state.ensure_preset(preset) preset_data = state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []), ignore_missing=True) stacks = state.cfg['presets'][preset]['stacks'] for k, v", "in name_variant: # name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # state.ensure_preconditions(name, variant,", "k, v['variant'] state.prepare_build_folder(preset, name, variant) cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build']) run_cmd(cmd,", "def stack_setup(ctx: click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState = ctx.obj #", "v in stacks.items(): name, variant = k, v['variant'] cmd = ' '.join(['docker', 'stack',", "state = SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj = state @swarm.group() @click.pass_context def preset(ctx: click.Context): state:", "stack_variant in sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t {}\".format(stack_variant)) # # # @stack.command('deploy') # @click.argument('name_variant', nargs=-1)", "swarm_cli.lib import SwarmModeState, load_env_files, run_cmd @click.group() @click.option('--environment', '-e', multiple=True, required=False) @click.pass_context def swarm(ctx:", "sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t {}\".format(stack_variant)) # # # @stack.command('deploy') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd',", "SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data = state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []), ignore_missing=True) stacks = state.cfg['presets'][preset]['stacks']", "in sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t {}\".format(stack_variant)) # # # @stack.command('deploy') # @click.argument('name_variant', nargs=-1) #", "@click.pass_context def preset(ctx: click.Context): state: SwarmModeState = ctx.obj @preset.command('ls') @click.option('--preset', '-p', help=\"Select a", "ctx.obj # for name_variant_elem in name_variant: # name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name,", "help=\"Select a preset\", required=False) @click.pass_context def preset_ls(ctx: click.Context, preset: str): state: SwarmModeState =", "run_cmd @click.group() @click.option('--environment', '-e', multiple=True, required=False) @click.pass_context def swarm(ctx: click.Context, environment: List[str]): load_env_files(environment)", "= ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) # env = state.get_environment_for_stack(preset, name,", "k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant'])) else: for preset in state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset))", "variant), name]) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant)) @preset.command('build') @click.option('--preset', '-p', help=\"Select a preset\",", "@click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_push(ctx: click.Context, preset:", "'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) # env = state.get_environment_for_stack(preset, name, variant) # run_cmd(cmd,", "@click.pass_context # def stack_deploy(ctx: click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState =", "@click.option('--dry-run', is_flag=True) @click.pass_context def preset_build(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState", "= k, v['variant'] cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) run_cmd(cmd,", "state.ensure_preset(preset) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant'])) else: for preset in state.cfg['presets'].keys():", "load_env_files(environment) state = SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj = state @swarm.group() @click.pass_context def preset(ctx: click.Context):", "state.get_environment_for_stack(preset, name, variant) # run_cmd(cmd, dry_run=dump_cmd, env=env) # # # @stack.command('setup') # @click.argument('name_variant',", "def stack_ls(ctx: click.Context): # state: SwarmModeState = ctx.obj # click.echo('Available stacks:') # for", "'-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_build(ctx: click.Context, preset: str", "state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant'])) else: for preset in state.cfg['presets'].keys(): click.secho(\"Preset {}\".format(preset)) for k, v", "k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\" - {}:{}\".format(k, v['variant'])) @preset.command('deploy') @click.option('--preset', '-p', help=\"Select a", "run_cmd(cmd, dry_run=dump_cmd, env=env) # # # @stack.command('setup') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True)", "= ctx.obj @preset.command('ls') @click.option('--preset', '-p', help=\"Select a preset\", required=False) @click.pass_context def preset_ls(ctx: click.Context,", "= state.get_environment_for_stack(preset, name, variant) # run_cmd(cmd, dry_run=dump_cmd, env=env) # # # @stack.command('setup') #", "required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_build(ctx: click.Context, preset: str = None, dry_run=False): state:", "name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant),", "'.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) # env = state.get_environment_for_stack(preset, name, variant) #", "= name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name,", "name_variant: # name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # cmd = '", "state.prepare_build_folder(preset, name, variant) cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build']) run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset,", "name, variant) ) @preset.command('push') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context", "@preset.command('deploy') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_deploy(ctx: click.Context,", "# # @stack.command('ls') # @click.pass_context # def stack_ls(ctx: click.Context): # state: SwarmModeState =", "' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) # env = state.get_environment_for_stack(preset, name, variant)", "'.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push']) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant) ) # @swarm.group() #", "help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_deploy(ctx: click.Context, preset: str =", "k, v in stacks.items(): name, variant = k, v['variant'] state.prepare_build_folder(preset, name, variant) cmd", "required=False) @click.pass_context def preset_ls(ctx: click.Context, preset: str): state: SwarmModeState = ctx.obj if preset:", "k, v['variant'] cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push']) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name,", "dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant)) @preset.command('build') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True)", "# run_cmd(cmd, dry_run=dump_cmd, env=env) # # # @stack.command('setup') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd',", "if preset: state.ensure_preset(preset) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant'])) else: for preset", "@click.pass_context def swarm(ctx: click.Context, environment: List[str]): load_env_files(environment) state = SwarmModeState() state.initFromFile('swarm-config.yml') ctx.obj =", "preset_build(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data", "name]) # env = state.get_environment_for_stack(preset, name, variant) # run_cmd(cmd, dry_run=dump_cmd, env=env) # #", "# def stack_setup(ctx: click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState = ctx.obj", "k, v in stacks.items(): name, variant = k, v['variant'] cmd = ' '.join(['docker',", "name, variant) cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'build']) run_cmd(cmd, dry_run=dry_run, cwd=state.get_build_folder(preset, name,", "= ctx.obj # click.echo('Available stacks:') # for stack_name in sorted(state.layered_stacks.keys()): # click.echo(stack_name) #", "# def stack_deploy(ctx: click.Context, name_variant: str, dump_cmd: str): # state: SwarmModeState = ctx.obj", "name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # cmd = ' '.join(['docker', 'stack',", "name, variant) # run_cmd(cmd, dry_run=dump_cmd, env=env) # # # @stack.command('setup') # @click.argument('name_variant', nargs=-1)", "import List import click from swarm_cli.lib import SwarmModeState, load_env_files, run_cmd @click.group() @click.option('--environment', '-e',", "preset_deploy(ctx: click.Context, preset: str = None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data", "dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant) ) # @swarm.group() # def stack(): # pass #", "# @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_setup(ctx: click.Context, name_variant: str, dump_cmd: str):", "# @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_deploy(ctx: click.Context, name_variant:", "str = None, dry_run=False): state: SwarmModeState = ctx.obj state.ensure_preset(preset) preset_data = state.cfg['presets'][preset] load_env_files(preset_data.get('env_files',", ") @preset.command('push') @click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_push(ctx:", "in name_variant: # name, variant = name_variant_elem.split(':') # state.ensure_stack_exists(name, variant) # cmd =", "= ctx.obj if preset: state.ensure_preset(preset) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant'])) else:", "@click.group() @click.option('--environment', '-e', multiple=True, required=False) @click.pass_context def swarm(ctx: click.Context, environment: List[str]): load_env_files(environment) state", "v in stacks.items(): name, variant = k, v['variant'] cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name,", "'.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant)) @preset.command('build') @click.option('--preset',", "click.echo(stack_name) # for stack_variant in sorted(state.layered_stacks[stack_name].keys()): # click.echo(\"\\t {}\".format(stack_variant)) # # # @stack.command('deploy')", "env=state.get_environment_for_stack(preset, name, variant) ) # @swarm.group() # def stack(): # pass # #", "SwarmModeState = ctx.obj if preset: state.ensure_preset(preset) for k, v in state.cfg['presets'][preset]['stacks'].items(): click.secho(\"{}:{}\".format(k, v['variant']))", "stacks.items(): name, variant = k, v['variant'] state.prepare_build_folder(preset, name, variant) cmd = ' '.join(['docker-compose',", "name, variant = k, v['variant'] cmd = ' '.join(['docker-compose', state.build_compose_sequence_for_stack(name, variant), 'push']) run_cmd(cmd,", "# # @stack.command('deploy') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def", "@stack.command('deploy') # @click.argument('name_variant', nargs=-1) # @click.option('--dump-cmd', is_flag=True) # @click.pass_context # def stack_deploy(ctx: click.Context,", "@click.option('--preset', '-p', help=\"Select a preset\", required=True) @click.option('--dry-run', is_flag=True) @click.pass_context def preset_deploy(ctx: click.Context, preset:", "preset_data = state.cfg['presets'][preset] load_env_files(preset_data.get('env_files', []), ignore_missing=True) stacks = state.cfg['presets'][preset]['stacks'] for k, v in", "def preset(ctx: click.Context): state: SwarmModeState = ctx.obj @preset.command('ls') @click.option('--preset', '-p', help=\"Select a preset\",", "state: SwarmModeState = ctx.obj @preset.command('ls') @click.option('--preset', '-p', help=\"Select a preset\", required=False) @click.pass_context def", "str): # state: SwarmModeState = ctx.obj # for name_variant_elem in name_variant: # name,", "typing import List import click from swarm_cli.lib import SwarmModeState, load_env_files, run_cmd @click.group() @click.option('--environment',", "k, v['variant'] cmd = ' '.join(['docker', 'stack', 'deploy', state.build_deploy_sequence_for_stack(name, variant), name]) run_cmd(cmd, dry_run=dry_run,", "state.build_compose_sequence_for_stack(name, variant), 'push']) run_cmd(cmd, dry_run=dry_run, env=state.get_environment_for_stack(preset, name, variant) ) # @swarm.group() # def" ]
[ "!= '_' and attrname not in kwargs and not hasattr(Schema, attrname) ): attr", "= _resource.__name__ if 'name' not in kwargs: kwargs['name'] = resname for attrname in", "= ParamTypeSchema(type=lkrtype) rschema = schemacls() if islist: rschema = ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes)", "{1} expected.'.format(data, self.type) ) @updatecontent class ParamSchema(RefSchema): \"\"\"Function parameter schema.\"\"\" #: if true", "name, default, index, val ) ) def _setvalue(self, schema, value): if schema.name ==", "not isinstance(data, self.type): raise TypeError( 'Wrong type of {0}. {1} expected.'.format(data, self.type) )", "None: self.ref = None if value is None else data2schema(value) if value is", "and kwargs. :rtype: tuple \"\"\" try: args, vargs, kwargs, default = getargspec(function) except", "and default is not None and val != default ): raise TypeError( '{0}.", "default is None else len(default)) params = OrderedDict() for index, arg in enumerate(args):", "self.rtype ) ) for index, pkwargs in enumerate(params.values()): name = pkwargs['name'] default =", "and pname in params: ptype = (match[0] or match[3]).strip() ptypes = ptype.split(',') schemas", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "if islist: rschema = ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes) > 1: rtype = OneOfSchema(schemas=schemas,", "re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__ = [ FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType ] params", "scope=scope) except ImportError: islist = True try: if rtype_[-1] == 's': lkrtype =", "value is None else data2schema(value) if value is not None: self.mandatory = False", "while validating {0} with {1}'.format(data, self) if data.__name__ != self.name: raise TypeError( '{0}.", "1: pschema = OneOfSchema(schemas=schemas, nullable=True) else: pschema = schemas[0] params[pname]['ref'] = pschema return", "func return result def funcschema(default=None, *args, **kwargs): \"\"\"Decorator to use in order to", "_resource else: result = datatype2schemacls(_datatype=_resource, _force=False) if result is None: resname = _resource.__name__", "data.__name__ != self.name: raise TypeError( '{0}. Wrong function name {1}. {2} expected.'.format( errormsg,", "elif ptype.startswith('list of '): lkptype = lookup(ptype[8:], scope=scope) else: raise except ImportError: msg", "schema.\"\"\" #: if true (default), update self ref when default is given. autotype", "obj, value, *args, **kwargs) pkwargs, self.rtype, vargs, kwargs = self._getparams_rtype(value) self.vargs = vargs", "enumerate(params.values()): name = pkwargs['name'] default = pkwargs.get('default') param = self.params[index] if param.name !=", "None if value is None else data2schema(value) pkwargs['mandatory'] = False params[arg] = pkwargs", "{3} expected.'.format( errormsg, name, index, param.name ) ) val = param.default if isinstance(val,", "rtype {1}. {2} expected.'.format( rtype, self.rtype ) ) for index, pkwargs in enumerate(params.values()):", "to permit persons to whom the Software is # furnished to do so,", "ptypes = ptype.split(',') schemas = [] for ptype in ptypes: ptype = ptype.strip()", "in pkwarg: val = pkwarg[key] if val is not None: setattr(selfparam, key, val)", "None and val != default ): raise TypeError( '{0}. Wrong val {1}/{2} at", "and this permission notice shall be included in # all copies or substantial", "except TypeError: schemacls = ParamTypeSchema(type=lkrtype) rschema = schemacls() if islist: rschema = ArraySchema(itemtype=rschema)", "= ParamTypeSchema(type=lkptype) pschema = schemacls() if islist: pschema = ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes)", "or kwargs if (not var) and len(params) != len(self.params): raise TypeError( '{0}. Wrong", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "params, rtype, vargs, kwargs def __call__(self, *args, **kwargs): return self.default(*args, **kwargs) def _getter(self,", "= (), (), (), () indexlen = len(args) - (0 if default is", "*args, **kwargs): func = ElementarySchema._getter(self, obj, *args, **kwargs) @wraps(func) def result(*args, **kwargs): try:", ") try: schemacls = datatype2schemacls(lkrtype) except TypeError: schemacls = ParamTypeSchema(type=lkrtype) rschema = schemacls()", "= schemacls() if islist: rschema = ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes) > 1: rtype", "for rtype_ in rtypes: rtype_ = rtype_.strip() islist = False try: lkrtype =", "in enumerate(params.values()): name = pkwargs['name'] default = pkwargs.get('default') param = self.params[index] if param.name", "ElementarySchema, ArraySchema, OneOfSchema, TypeSchema from ..utils import updatecontent, data2schema, datatype2schemacls, RefSchema from types", "None if rrtype: rtypes = rrtype.split(',') schemas = [] for rtype_ in rtypes:", "= False class FunctionSchema(ElementarySchema): \"\"\"Function schema. Dedicated to describe functions, methods and lambda", "params self.impltype = 'python' try: self.impl = str(getsourcelines(value)) except TypeError: self.impl = ''", "False try: lkptype = lookup(ptype, scope=scope) except ImportError: islist = True try: if", ") ) val = param.default if isinstance(val, DynamicValue): val = val() if (", "into a schema.\"\"\" if default is None: return lambda default: funcschema(default=default, *args, **kwargs)", "= value ElementarySchema._setter(self, obj, value, *args, **kwargs) pkwargs, self.rtype, vargs, kwargs = self._getparams_rtype(value)", "if rtype_[-1] == 's': lkrtype = lookup( rtype_[:-1], scope=scope ) elif rtype_.startswith('list of", "_PTYPE, _RTYPE)) __data_types__ = [ FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType ] params =", "not None and type(self.rtype) != type(rtype): raise TypeError( '{0}. Wrong rtype {1}. {2}", "default is given. autotype = True mandatory = True #: if true (default),", "scope = get_function_globals(function) for match in cls._REC.findall(function.__doc__): if rtype is None: rrtype =", "ptype = (match[0] or match[3]).strip() ptypes = ptype.split(',') schemas = [] for ptype", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "selfparams: selfparam = selfparams[name] if selfparam is None: selfparam = ParamSchema(**pkwarg) else: for", "str(getsourcelines(value)) except TypeError: self.impl = '' @classmethod def _getparams_rtype(cls, function): \"\"\"Get function params", "true (default), parameter value is mandatory. def _setvalue(self, schema, value, *args, **kwargs): super(ParamSchema,", "[ FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType ] params = ArraySchema(itemtype=ParamSchema()) rtype = Schema()", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "= [ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge of building", "**kwargs) result = build(_cls, **kwargs) return result class ParamTypeSchema(Schema): \"\"\"In charge of embedding", "self.params = params self.impltype = 'python' try: self.impl = str(getsourcelines(value)) except TypeError: self.impl", "ref when default is given. autotype = True mandatory = True #: if", "BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType ) from six import get_function_globals from inspect import getargspec, getsourcelines,", "value value = default[index - indexlen] pkwargs['default'] = value pkwargs['ref'] = None if", "vargs and kwargs. :rtype: tuple \"\"\" try: args, vargs, kwargs, default = getargspec(function)", "generating a schema.\"\"\" type = TypeSchema() def _validate(self, data, *args, **kwargs): super(ParamTypeSchema, self)._validate(data=data,", "(match[0] or match[3]).strip() ptypes = ptype.split(',') schemas = [] for ptype in ptypes:", "= None if hasattr(schemacls, 'mro'): for mro in schemacls.mro(): if issubclass(mro, Schema): result", "from re import compile as re_compile from b3j0f.utils.version import OrderedDict from b3j0f.utils.path import", "'{0}. Wrong param {1} at {2}. {3} expected.'.format( errormsg, name, index, param.name )", "default value value = default[index - indexlen] pkwargs['default'] = value pkwargs['ref'] = None", "\"\"\" _PDESC = r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE = r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\\n]+)'", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "TypeError( 'Wrong type {0}, \\'type\\' expected'.format(_resource) ) if issubclass(_resource, Schema): result = _resource", "result = func(obj, *args, **kwargs) except TypeError: result = func(*args, **kwargs) return result", "indexlen: # has default value value = default[index - indexlen] pkwargs['default'] = value", "in self.params: selfparams[selfparam.name] = selfparam index = 0 for index, pkwarg in enumerate(pkwargs.values()):", "\"{0}\" ({1}) from {2} not found.' raise ImportError( msg.format(rtype_, rrtype, function) ) try:", "try: schemacls = datatype2schemacls(lkptype) except TypeError: schemacls = ParamTypeSchema(type=lkptype) pschema = schemacls() if", "self.olddefault is value: return self.olddefault = value ElementarySchema._setter(self, obj, value, *args, **kwargs) pkwargs,", "MemberDescriptorType): kwargs[attrname] = attr result = type(resname, (Schema,), kwargs) return result def getresource(self,", "a schema.\"\"\" if default is None: return lambda default: funcschema(default=default, *args, **kwargs) return", "build a schema from the decorate class. :param type _cls: class to decorate.", "isbuiltin from functools import wraps __all__ = [ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema' ]", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "= len(args) - (0 if default is None else len(default)) params = OrderedDict()", "elif rtype_.startswith('list of '): lkrtype = lookup( rtype_[8:], scope=scope ) else: raise except", "software and associated documentation files (the \"Software\"), to deal # in the Software", "self.vargs = vargs or kwargs params = [] selfparams = {} for selfparam", "param.default if isinstance(val, DynamicValue): val = val() if ( val is not None", "from .factory import SchemaBuilder, build from ..elementary import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema from", "issubclass(mro, Schema): result = mro break return result def buildschema(_cls=None, **kwargs): \"\"\"Class decorator", "= self._getparams_rtype(function=data) var = self.varargs or vargs or kwargs if (not var) and", "and to permit persons to whom the Software is # furnished to do", "self)._validate(data=data, *args, **kwargs) if not isinstance(data, self.type): raise TypeError( 'Wrong type of {0}.", "getsourcelines, isclass, isbuiltin from functools import wraps __all__ = [ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema',", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "rschema = schemacls() if islist: rschema = ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes) > 1:", "False def _validate(self, data, owner, *args, **kwargs): ElementarySchema._validate(self, data=data, *args, **kwargs) if data", "try: if ptype[-1] == 's': lkptype = lookup(ptype[:-1], scope=scope) elif ptype.startswith('list of '):", "from ..elementary import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema from ..utils import updatecontent, data2schema, datatype2schemacls,", "schema.\"\"\" type = TypeSchema() def _validate(self, data, *args, **kwargs): super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs)", "len(rtypes) > 1: rtype = OneOfSchema(schemas=schemas, nullable=True) else: rtype = schemas[0] continue pname", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "try: if rtype_[-1] == 's': lkrtype = lookup( rtype_[:-1], scope=scope ) elif rtype_.startswith('list", "0 for index, pkwarg in enumerate(pkwargs.values()): name = pkwarg['name'] selfparam = None #", "if default is None: return lambda default: funcschema(default=default, *args, **kwargs) return FunctionSchema(default=default, *args,", "function) ) try: schemacls = datatype2schemacls(lkptype) except TypeError: schemacls = ParamTypeSchema(type=lkptype) pschema =", "dir(_resource): if ( attrname and attrname[0] != '_' and attrname not in kwargs", "import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema from ..utils import updatecontent, data2schema, datatype2schemacls, RefSchema from", "value ElementarySchema._setter(self, obj, value, *args, **kwargs) pkwargs, self.rtype, vargs, kwargs = self._getparams_rtype(value) self.vargs", "pschema = schemacls() if islist: pschema = ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes) > 1:", "if self.olddefault is value: return self.olddefault = value ElementarySchema._setter(self, obj, value, *args, **kwargs)", "of '): lkptype = lookup(ptype[8:], scope=scope) else: raise except ImportError: msg = 'Error", ") from six import get_function_globals from inspect import getargspec, getsourcelines, isclass, isbuiltin from", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "from ..utils import updatecontent, data2schema, datatype2schemacls, RefSchema from types import ( FunctionType, MethodType,", "from functools import wraps __all__ = [ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema' ] class", "scope=scope ) elif rtype_.startswith('list of '): lkrtype = lookup( rtype_[8:], scope=scope ) else:", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "self)._setvalue(schema, value, *args, **kwargs) if schema.name == 'default': if self.autotype and self.ref is", "ImportError( msg.format(pname, ptype, function) ) try: schemacls = datatype2schemacls(lkptype) except TypeError: schemacls =", "resname for attrname in dir(_resource): if ( attrname and attrname[0] != '_' and", "if len(ptypes) > 1: pschema = OneOfSchema(schemas=schemas, nullable=True) else: pschema = schemas[0] params[pname]['ref']", "TypeError: args, vargs, kwargs, default = (), (), (), () indexlen = len(args)", "None: setattr(selfparam, key, val) params.append(selfparam) self.params = params self.impltype = 'python' try: self.impl", "== 'default': self._setter(obj=self, value=value) def _setter(self, obj, value, *args, **kwargs): if hasattr(self, 'olddefault'):", "else: for key in pkwarg: val = pkwarg[key] if val is not None:", "ImportError: islist = True try: if rtype_[-1] == 's': lkrtype = lookup( rtype_[:-1],", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "to deal # in the Software without restriction, including without limitation the rights", "to any person obtaining a copy # of this software and associated documentation", "= False try: lkptype = lookup(ptype, scope=scope) except ImportError: islist = True try:", "import lookup from ..base import Schema, DynamicValue from .factory import SchemaBuilder, build from", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "lambda objects. \"\"\" _PDESC = r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE = r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE", "try: self.impl = str(getsourcelines(value)) except TypeError: self.impl = '' @classmethod def _getparams_rtype(cls, function):", "= 'rtype \"{0}\" ({1}) from {2} not found.' raise ImportError( msg.format(rtype_, rrtype, function)", "lkrtype = lookup(rtype_, scope=scope) except ImportError: islist = True try: if rtype_[-1] ==", "not in kwargs and not hasattr(Schema, attrname) ): attr = getattr(_resource, attrname) if", "lookup(ptype[8:], scope=scope) else: raise except ImportError: msg = 'Error on ptype \"{0}\" ({1})", "nullable=True) else: pschema = schemas[0] params[pname]['ref'] = pschema return params, rtype, vargs, kwargs", "len(self.params) ) ) if self.rtype is not None and type(self.rtype) != type(rtype): raise", "return lambda _cls: buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs) return result class ParamTypeSchema(Schema):", "this software and associated documentation files (the \"Software\"), to deal # in the", ":rtype: tuple \"\"\" try: args, vargs, kwargs, default = getargspec(function) except TypeError: args,", "args, vargs, kwargs, default = getargspec(function) except TypeError: args, vargs, kwargs, default =", "value is None else data2schema(value) pkwargs['mandatory'] = False params[arg] = pkwargs rtype =", "lkptype = lookup(ptype[:-1], scope=scope) elif ptype.startswith('list of '): lkptype = lookup(ptype[8:], scope=scope) else:", "b3j0f.utils.version import OrderedDict from b3j0f.utils.path import lookup from ..base import Schema, DynamicValue from", "self._getparams_rtype(function=data) var = self.varargs or vargs or kwargs if (not var) and len(params)", "granted, free of charge, to any person obtaining a copy # of this", "expected.'.format( rtype, self.rtype ) ) for index, pkwargs in enumerate(params.values()): name = pkwargs['name']", "islist = False try: lkptype = lookup(ptype, scope=scope) except ImportError: islist = True", "as re_compile from b3j0f.utils.version import OrderedDict from b3j0f.utils.path import lookup from ..base import", "None and default is not None and val != default ): raise TypeError(", "while generating a schema.\"\"\" type = TypeSchema() def _validate(self, data, *args, **kwargs): super(ParamTypeSchema,", "lkptype = lookup(ptype, scope=scope) except ImportError: islist = True try: if ptype[-1] ==", "# Permission is hereby granted, free of charge, to any person obtaining a", "data2schema(value) pkwargs['mandatory'] = False params[arg] = pkwargs rtype = None # parse docstring", "kwargs if (not var) and len(params) != len(self.params): raise TypeError( '{0}. Wrong param", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "{0}, \\'type\\' expected'.format(_resource) ) if issubclass(_resource, Schema): result = _resource else: result =", "b3j0f.utils.path import lookup from ..base import Schema, DynamicValue from .factory import SchemaBuilder, build", "except TypeError: self.impl = '' @classmethod def _getparams_rtype(cls, function): \"\"\"Get function params from", "val = pkwarg[key] if val is not None: setattr(selfparam, key, val) params.append(selfparam) self.params", "TypeError( '{0}. Wrong param length: {1}. {2} expected.'.format( errormsg, len(params), len(self.params) ) )", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "and len(params) != len(self.params): raise TypeError( '{0}. Wrong param length: {1}. {2} expected.'.format(", "if ptype[-1] == 's': lkptype = lookup(ptype[:-1], scope=scope) elif ptype.startswith('list of '): lkptype", "__data_types__ = [ FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType ] params = ArraySchema(itemtype=ParamSchema()) rtype", "!= default ): raise TypeError( '{0}. Wrong val {1}/{2} at {3}. Expected {4}.'.format(", "**kwargs): super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs) if not isinstance(data, self.type): raise TypeError( 'Wrong type", "expected.'.format( errormsg, len(params), len(self.params) ) ) if self.rtype is not None and type(self.rtype)", "is None: return lambda _cls: buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs) return result", "copies of the Software, and to permit persons to whom the Software is", "obj, *args, **kwargs) @wraps(func) def result(*args, **kwargs): try: result = func(obj, *args, **kwargs)", "ImportError: msg = 'Error on ptype \"{0}\" ({1}) from {2} not found.' raise", "TypeError: schemacls = ParamTypeSchema(type=lkrtype) rschema = schemacls() if islist: rschema = ArraySchema(itemtype=rschema) schemas.append(rschema)", "_RTYPE)) __data_types__ = [ FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType ] params = ArraySchema(itemtype=ParamSchema())", "a function into a schema.\"\"\" if default is None: return lambda default: funcschema(default=default,", "{0}. {1} expected.'.format(data, self.type) ) @updatecontent class ParamSchema(RefSchema): \"\"\"Function parameter schema.\"\"\" #: if", "result(*args, **kwargs): try: result = func(obj, *args, **kwargs) except TypeError: result = func(*args,", "!= type(rtype): raise TypeError( '{0}. Wrong rtype {1}. {2} expected.'.format( rtype, self.rtype )", "MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType ) from six import get_function_globals from inspect import", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "The MIT License (MIT) # # Copyright (c) 2016 <NAME> <<EMAIL>> # #", "schemacls): result = None if hasattr(schemacls, 'mro'): for mro in schemacls.mro(): if issubclass(mro,", "a parameter type which met a problem while generating a schema.\"\"\" type =", "msg.format(rtype_, rrtype, function) ) try: schemacls = datatype2schemacls(lkrtype) except TypeError: schemacls = ParamTypeSchema(type=lkrtype)", "vargs, kwargs = self._getparams_rtype(function=data) var = self.varargs or vargs or kwargs if (not", "datatype2schemacls(lkptype) except TypeError: schemacls = ParamTypeSchema(type=lkptype) pschema = schemacls() if islist: pschema =", "if value is not None: self.mandatory = False class FunctionSchema(ElementarySchema): \"\"\"Function schema. Dedicated", "expected.'.format( errormsg, name, index, param.name ) ) val = param.default if isinstance(val, DynamicValue):", "= pschema return params, rtype, vargs, kwargs def __call__(self, *args, **kwargs): return self.default(*args,", "self) if data.__name__ != self.name: raise TypeError( '{0}. Wrong function name {1}. {2}", "isinstance(data, self.type): raise TypeError( 'Wrong type of {0}. {1} expected.'.format(data, self.type) ) @updatecontent", "} # param kwargs if index >= indexlen: # has default value value", "DEALINGS IN THE # SOFTWARE. # -------------------------------------------------------------------- \"\"\"Python language schemas utilities.\"\"\" from re", "if issubclass(mro, Schema): result = mro break return result def buildschema(_cls=None, **kwargs): \"\"\"Class", "function name {1}. {2} expected.'.format( errormsg, data.__name__, self.name ) ) params, rtype, vargs,", "= params self.impltype = 'python' try: self.impl = str(getsourcelines(value)) except TypeError: self.impl =", "= '' @classmethod def _getparams_rtype(cls, function): \"\"\"Get function params from input function and", "= None if value is None else data2schema(value) if value is not None:", "{} for selfparam in self.params: selfparams[selfparam.name] = selfparam index = 0 for index,", "any person obtaining a copy # of this software and associated documentation files", "if not isinstance(attr, MemberDescriptorType): kwargs[attrname] = attr result = type(resname, (Schema,), kwargs) return", "from six import get_function_globals from inspect import getargspec, getsourcelines, isclass, isbuiltin from functools", "= Schema() impl = '' impltype = '' safe = False varargs =", "in enumerate(args): pkwargs = { 'name': arg, 'mandatory': True } # param kwargs", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "rtype is None: rrtype = match[4].strip() or None if rrtype: rtypes = rrtype.split(',')", "a copy # of this software and associated documentation files (the \"Software\"), to", "True #: if true (default), parameter value is mandatory. def _setvalue(self, schema, value,", "not None: setattr(selfparam, key, val) params.append(selfparam) self.params = params self.impltype = 'python' try:", "[] for rtype_ in rtypes: rtype_ = rtype_.strip() islist = False try: lkrtype", "**kwargs) if data != self.default or data is not self.default: errormsg = 'Error", "isclass, isbuiltin from functools import wraps __all__ = [ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema'", "ParamTypeSchema(Schema): \"\"\"In charge of embedding a parameter type which met a problem while", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "'' safe = False varargs = False def _validate(self, data, owner, *args, **kwargs):", "match in cls._REC.findall(function.__doc__): if rtype is None: rrtype = match[4].strip() or None if", "is None: self.ref = None if value is None else data2schema(value) if value", "var = self.varargs or vargs or kwargs if (not var) and len(params) !=", "charge, to any person obtaining a copy # of this software and associated", "build from ..elementary import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema from ..utils import updatecontent, data2schema,", "val = val() if ( val is not None and default is not", "describe functions, methods and lambda objects. \"\"\" _PDESC = r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE", "= r':rtype:(?P<rtype>[^\\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__ = [ FunctionType, MethodType, LambdaType,", "*args, **kwargs): ElementarySchema._validate(self, data=data, *args, **kwargs) if data != self.default or data is", "is None: resname = _resource.__name__ if 'name' not in kwargs: kwargs['name'] = resname", "result = None if hasattr(schemacls, 'mro'): for mro in schemacls.mro(): if issubclass(mro, Schema):", "islist = True try: if rtype_[-1] == 's': lkrtype = lookup( rtype_[:-1], scope=scope", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "*args, **kwargs) if schema.name == 'default': if self.autotype and self.ref is None: self.ref", "raise TypeError( 'Wrong type of {0}. {1} expected.'.format(data, self.type) ) @updatecontent class ParamSchema(RefSchema):", "and type(self.rtype) != type(rtype): raise TypeError( '{0}. Wrong rtype {1}. {2} expected.'.format( rtype,", "in kwargs: kwargs['name'] = resname for attrname in dir(_resource): if ( attrname and", "value=value) def _setter(self, obj, value, *args, **kwargs): if hasattr(self, 'olddefault'): if self.olddefault is", "except ImportError: msg = 'rtype \"{0}\" ({1}) from {2} not found.' raise ImportError(", "\"\"\"Function schema. Dedicated to describe functions, methods and lambda objects. \"\"\" _PDESC =", "value, *args, **kwargs) if schema.name == 'default': if self.autotype and self.ref is None:", "not isclass(_resource): raise TypeError( 'Wrong type {0}, \\'type\\' expected'.format(_resource) ) if issubclass(_resource, Schema):", "kwargs, default = getargspec(function) except TypeError: args, vargs, kwargs, default = (), (),", "import updatecontent, data2schema, datatype2schemacls, RefSchema from types import ( FunctionType, MethodType, LambdaType, BuiltinFunctionType,", "FunctionSchema(ElementarySchema): \"\"\"Function schema. Dedicated to describe functions, methods and lambda objects. \"\"\" _PDESC", "'default': if self.autotype and self.ref is None: self.ref = None if value is", "= value pkwargs['ref'] = None if value is None else data2schema(value) pkwargs['mandatory'] =", "parameter schema.\"\"\" #: if true (default), update self ref when default is given.", "in dir(_resource): if ( attrname and attrname[0] != '_' and attrname not in", "= { 'name': arg, 'mandatory': True } # param kwargs if index >=", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "self.default or data is not self.default: errormsg = 'Error while validating {0} with", "params.append(selfparam) self.params = params self.impltype = 'python' try: self.impl = str(getsourcelines(value)) except TypeError:", "kwargs if index >= indexlen: # has default value value = default[index -", "ptype.startswith('list of '): lkptype = lookup(ptype[8:], scope=scope) else: raise except ImportError: msg =", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "= ElementarySchema._getter(self, obj, *args, **kwargs) @wraps(func) def result(*args, **kwargs): try: result = func(obj,", "True mandatory = True #: if true (default), parameter value is mandatory. def", "_cls: buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs) return result class ParamTypeSchema(Schema): \"\"\"In charge", "_getter(self, obj, *args, **kwargs): func = ElementarySchema._getter(self, obj, *args, **kwargs) @wraps(func) def result(*args,", "= lookup(ptype[:-1], scope=scope) elif ptype.startswith('list of '): lkptype = lookup(ptype[8:], scope=scope) else: raise", "selfparam in self.params: selfparams[selfparam.name] = selfparam index = 0 for index, pkwarg in", "kwargs and not hasattr(Schema, attrname) ): attr = getattr(_resource, attrname) if not isinstance(attr,", "if data.__name__ != self.name: raise TypeError( '{0}. Wrong function name {1}. {2} expected.'.format(", "= ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes) > 1: pschema = OneOfSchema(schemas=schemas, nullable=True) else: pschema", "raise except ImportError: msg = 'Error on ptype \"{0}\" ({1}) from {2} not", "else: raise except ImportError: msg = 'rtype \"{0}\" ({1}) from {2} not found.'", "self.impl = '' @classmethod def _getparams_rtype(cls, function): \"\"\"Get function params from input function", "type :return: schema class. \"\"\" if _cls is None: return lambda _cls: buildschema(_cls=_cls,", "obj, *args, **kwargs): func = ElementarySchema._getter(self, obj, *args, **kwargs) @wraps(func) def result(*args, **kwargs):", "selfparams[name] if selfparam is None: selfparam = ParamSchema(**pkwarg) else: for key in pkwarg:", "# # Permission is hereby granted, free of charge, to any person obtaining", "kwargs def __call__(self, *args, **kwargs): return self.default(*args, **kwargs) def _getter(self, obj, *args, **kwargs):", "try: args, vargs, kwargs, default = getargspec(function) except TypeError: args, vargs, kwargs, default", "issubclass(_resource, Schema): result = _resource else: result = datatype2schemacls(_datatype=_resource, _force=False) if result is", "buildschema(_cls=None, **kwargs): \"\"\"Class decorator used to build a schema from the decorate class.", "# has default value value = default[index - indexlen] pkwargs['default'] = value pkwargs['ref']", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "methods and lambda objects. \"\"\" _PDESC = r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE = r':type", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "param kwargs if index >= indexlen: # has default value value = default[index", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "index, val ) ) def _setvalue(self, schema, value): if schema.name == 'default': self._setter(obj=self,", "result = build(_cls, **kwargs) return result class ParamTypeSchema(Schema): \"\"\"In charge of embedding a", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "rrtype, function) ) try: schemacls = datatype2schemacls(lkrtype) except TypeError: schemacls = ParamTypeSchema(type=lkrtype) rschema", "of the Software, and to permit persons to whom the Software is #", "function.__doc__ is not None and not isbuiltin(function): scope = get_function_globals(function) for match in", "if schema.name == 'default': if self.autotype and self.ref is None: self.ref = None", "data, owner, *args, **kwargs): ElementarySchema._validate(self, data=data, *args, **kwargs) if data != self.default or", "{2} expected.'.format( rtype, self.rtype ) ) for index, pkwargs in enumerate(params.values()): name =", "**kwargs): ElementarySchema._validate(self, data=data, *args, **kwargs) if data != self.default or data is not", "**kwargs) if not isinstance(data, self.type): raise TypeError( 'Wrong type of {0}. {1} expected.'.format(data,", "index, pkwargs in enumerate(params.values()): name = pkwargs['name'] default = pkwargs.get('default') param = self.params[index]", "if default is None else len(default)) params = OrderedDict() for index, arg in", "classes.\"\"\" __name__ = 'python' def build(self, _resource, **kwargs): if not isclass(_resource): raise TypeError(", "is None else data2schema(value) if value is not None: self.mandatory = False class", "def buildschema(_cls=None, **kwargs): \"\"\"Class decorator used to build a schema from the decorate", "value): if schema.name == 'default': self._setter(obj=self, value=value) def _setter(self, obj, value, *args, **kwargs):", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "None: resname = _resource.__name__ if 'name' not in kwargs: kwargs['name'] = resname for", "TypeError( 'Wrong type of {0}. {1} expected.'.format(data, self.type) ) @updatecontent class ParamSchema(RefSchema): \"\"\"Function", "'Error while validating {0} with {1}'.format(data, self) if data.__name__ != self.name: raise TypeError(", "= vargs or kwargs params = [] selfparams = {} for selfparam in", "'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge of building python classes.\"\"\"", "kwargs['name'] = resname for attrname in dir(_resource): if ( attrname and attrname[0] !=", "__all__ = [ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge of", "rtype = schemas[0] continue pname = (match[1] or match[2]).strip() if pname and pname", "ElementarySchema._setter(self, obj, value, *args, **kwargs) pkwargs, self.rtype, vargs, kwargs = self._getparams_rtype(value) self.vargs =", "func(*args, **kwargs) return result result.source = func return result def funcschema(default=None, *args, **kwargs):", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "(c) 2016 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge,", "if val is not None: setattr(selfparam, key, val) params.append(selfparam) self.params = params self.impltype", "= datatype2schemacls(_datatype=_resource, _force=False) if result is None: resname = _resource.__name__ if 'name' not", "**kwargs): \"\"\"Class decorator used to build a schema from the decorate class. :param", "selfparam = None # old self param if name in selfparams: selfparam =", "msg = 'Error on ptype \"{0}\" ({1}) from {2} not found.' raise ImportError(", "= rtype_.strip() islist = False try: lkrtype = lookup(rtype_, scope=scope) except ImportError: islist", ") def _setvalue(self, schema, value): if schema.name == 'default': self._setter(obj=self, value=value) def _setter(self,", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "= 'Error while validating {0} with {1}'.format(data, self) if data.__name__ != self.name: raise", "to describe functions, methods and lambda objects. \"\"\" _PDESC = r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):'", "from {2} not found.' raise ImportError( msg.format(pname, ptype, function) ) try: schemacls =", "= TypeSchema() def _validate(self, data, *args, **kwargs): super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs) if not", "_RTYPE = r':rtype:(?P<rtype>[^\\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__ = [ FunctionType, MethodType,", "isinstance(val, DynamicValue): val = val() if ( val is not None and default", "super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs) if not isinstance(data, self.type): raise TypeError( 'Wrong type of", "mandatory. def _setvalue(self, schema, value, *args, **kwargs): super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs) if", ") try: schemacls = datatype2schemacls(lkptype) except TypeError: schemacls = ParamTypeSchema(type=lkptype) pschema = schemacls()", "scope=scope) elif ptype.startswith('list of '): lkptype = lookup(ptype[8:], scope=scope) else: raise except ImportError:", "attrname in dir(_resource): if ( attrname and attrname[0] != '_' and attrname not", "!= self.default or data is not self.default: errormsg = 'Error while validating {0}", "FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType ) from six import get_function_globals from inspect", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "= lookup( rtype_[:-1], scope=scope ) elif rtype_.startswith('list of '): lkrtype = lookup( rtype_[8:],", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "return result result.source = func return result def funcschema(default=None, *args, **kwargs): \"\"\"Decorator to", "{1}/{2} at {3}. Expected {4}.'.format( errormsg, name, default, index, val ) ) def", "{2} expected.'.format( errormsg, len(params), len(self.params) ) ) if self.rtype is not None and", "the decorate class. :param type _cls: class to decorate. :param kwargs: schema attributes", "is hereby granted, free of charge, to any person obtaining a copy #", "in params: ptype = (match[0] or match[3]).strip() ptypes = ptype.split(',') schemas = []", "data != self.default or data is not self.default: errormsg = 'Error while validating", "None: rrtype = match[4].strip() or None if rrtype: rtypes = rrtype.split(',') schemas =", "_validate(self, data, owner, *args, **kwargs): ElementarySchema._validate(self, data=data, *args, **kwargs) if data != self.default", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "is mandatory. def _setvalue(self, schema, value, *args, **kwargs): super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs)", "and attrname[0] != '_' and attrname not in kwargs and not hasattr(Schema, attrname)", "{2}. {3} expected.'.format( errormsg, name, index, param.name ) ) val = param.default if", "TypeError( '{0}. Wrong param {1} at {2}. {3} expected.'.format( errormsg, name, index, param.name", "so, subject to the following conditions: # # The above copyright notice and", "{1}'.format(data, self) if data.__name__ != self.name: raise TypeError( '{0}. Wrong function name {1}.", "lookup(ptype, scope=scope) except ImportError: islist = True try: if ptype[-1] == 's': lkptype", "_validate(self, data, *args, **kwargs): super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs) if not isinstance(data, self.type): raise", "len(params) != len(self.params): raise TypeError( '{0}. Wrong param length: {1}. {2} expected.'.format( errormsg,", "copy # of this software and associated documentation files (the \"Software\"), to deal", "\"\"\"In charge of building python classes.\"\"\" __name__ = 'python' def build(self, _resource, **kwargs):", "vargs, kwargs, default = getargspec(function) except TypeError: args, vargs, kwargs, default = (),", "self ref when default is given. autotype = True mandatory = True #:", "default = pkwargs.get('default') param = self.params[index] if param.name != name: raise TypeError( '{0}.", "impltype = '' safe = False varargs = False def _validate(self, data, owner,", "OneOfSchema(schemas=schemas, nullable=True) else: rtype = schemas[0] continue pname = (match[1] or match[2]).strip() if", "License (MIT) # # Copyright (c) 2016 <NAME> <<EMAIL>> # # Permission is", "None and type(self.rtype) != type(rtype): raise TypeError( '{0}. Wrong rtype {1}. {2} expected.'.format(", "val != default ): raise TypeError( '{0}. Wrong val {1}/{2} at {3}. Expected", "try: schemacls = datatype2schemacls(lkrtype) except TypeError: schemacls = ParamTypeSchema(type=lkrtype) rschema = schemacls() if", ":param kwargs: schema attributes to set. :rtype: type :return: schema class. \"\"\" if", "in rtypes: rtype_ = rtype_.strip() islist = False try: lkrtype = lookup(rtype_, scope=scope)", "result class ParamTypeSchema(Schema): \"\"\"In charge of embedding a parameter type which met a", "enumerate(args): pkwargs = { 'name': arg, 'mandatory': True } # param kwargs if", "param if name in selfparams: selfparam = selfparams[name] if selfparam is None: selfparam", "impl = '' impltype = '' safe = False varargs = False def", "is not None and not isbuiltin(function): scope = get_function_globals(function) for match in cls._REC.findall(function.__doc__):", "schemacls.mro(): if issubclass(mro, Schema): result = mro break return result def buildschema(_cls=None, **kwargs):", "length: {1}. {2} expected.'.format( errormsg, len(params), len(self.params) ) ) if self.rtype is not", "pkwargs = { 'name': arg, 'mandatory': True } # param kwargs if index", "validating {0} with {1}'.format(data, self) if data.__name__ != self.name: raise TypeError( '{0}. Wrong", "is not None and default is not None and val != default ):", "ptype.strip() islist = False try: lkptype = lookup(ptype, scope=scope) except ImportError: islist =", "sublicense, and/or sell # copies of the Software, and to permit persons to", "if function.__doc__ is not None and not isbuiltin(function): scope = get_function_globals(function) for match", "vargs, kwargs = self._getparams_rtype(value) self.vargs = vargs or kwargs params = [] selfparams", "selfparam is None: selfparam = ParamSchema(**pkwarg) else: for key in pkwarg: val =", "val() if ( val is not None and default is not None and", "TypeError( '{0}. Wrong function name {1}. {2} expected.'.format( errormsg, data.__name__, self.name ) )", "TypeError: self.impl = '' @classmethod def _getparams_rtype(cls, function): \"\"\"Get function params from input", "if issubclass(_resource, Schema): result = _resource else: result = datatype2schemacls(_datatype=_resource, _force=False) if result", "TypeSchema() def _validate(self, data, *args, **kwargs): super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs) if not isinstance(data,", "Expected {4}.'.format( errormsg, name, default, index, val ) ) def _setvalue(self, schema, value):", "@wraps(func) def result(*args, **kwargs): try: result = func(obj, *args, **kwargs) except TypeError: result", "in selfparams: selfparam = selfparams[name] if selfparam is None: selfparam = ParamSchema(**pkwarg) else:", "(), (), (), () indexlen = len(args) - (0 if default is None", "# copies of the Software, and to permit persons to whom the Software", "mro in schemacls.mro(): if issubclass(mro, Schema): result = mro break return result def", "False params[arg] = pkwargs rtype = None # parse docstring if function.__doc__ is", "indexlen] pkwargs['default'] = value pkwargs['ref'] = None if value is None else data2schema(value)", "'): lkrtype = lookup( rtype_[8:], scope=scope ) else: raise except ImportError: msg =", "datatype2schemacls, RefSchema from types import ( FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType )", "islist: pschema = ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes) > 1: pschema = OneOfSchema(schemas=schemas, nullable=True)", "({1}) from {2} not found.' raise ImportError( msg.format(pname, ptype, function) ) try: schemacls", "this permission notice shall be included in # all copies or substantial portions", "- indexlen] pkwargs['default'] = value pkwargs['ref'] = None if value is None else", "errormsg, name, index, param.name ) ) val = param.default if isinstance(val, DynamicValue): val", "= False def _validate(self, data, owner, *args, **kwargs): ElementarySchema._validate(self, data=data, *args, **kwargs) if", "isbuiltin(function): scope = get_function_globals(function) for match in cls._REC.findall(function.__doc__): if rtype is None: rrtype", "lookup( rtype_[:-1], scope=scope ) elif rtype_.startswith('list of '): lkrtype = lookup( rtype_[8:], scope=scope", "type which met a problem while generating a schema.\"\"\" type = TypeSchema() def", "else: result = datatype2schemacls(_datatype=_resource, _force=False) if result is None: resname = _resource.__name__ if", "'olddefault'): if self.olddefault is value: return self.olddefault = value ElementarySchema._setter(self, obj, value, *args,", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "result result.source = func return result def funcschema(default=None, *args, **kwargs): \"\"\"Decorator to use", "== 's': lkptype = lookup(ptype[:-1], scope=scope) elif ptype.startswith('list of '): lkptype = lookup(ptype[8:],", "type(resname, (Schema,), kwargs) return result def getresource(self, schemacls): result = None if hasattr(schemacls,", "(default), update self ref when default is given. autotype = True mandatory =", "**kwargs): if hasattr(self, 'olddefault'): if self.olddefault is value: return self.olddefault = value ElementarySchema._setter(self,", "({1}) from {2} not found.' raise ImportError( msg.format(rtype_, rrtype, function) ) try: schemacls", "= OneOfSchema(schemas=schemas, nullable=True) else: pschema = schemas[0] params[pname]['ref'] = pschema return params, rtype,", "result def buildschema(_cls=None, **kwargs): \"\"\"Class decorator used to build a schema from the", "ElementarySchema._getter(self, obj, *args, **kwargs) @wraps(func) def result(*args, **kwargs): try: result = func(obj, *args,", "= [] selfparams = {} for selfparam in self.params: selfparams[selfparam.name] = selfparam index", "if _cls is None: return lambda _cls: buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs)", "len(default)) params = OrderedDict() for index, arg in enumerate(args): pkwargs = { 'name':", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "rtype = OneOfSchema(schemas=schemas, nullable=True) else: rtype = schemas[0] continue pname = (match[1] or", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "True } # param kwargs if index >= indexlen: # has default value", ":return: schema class. \"\"\" if _cls is None: return lambda _cls: buildschema(_cls=_cls, **kwargs)", "= getattr(_resource, attrname) if not isinstance(attr, MemberDescriptorType): kwargs[attrname] = attr result = type(resname,", "embedding a parameter type which met a problem while generating a schema.\"\"\" type", "= '' impltype = '' safe = False varargs = False def _validate(self,", "if isinstance(val, DynamicValue): val = val() if ( val is not None and", "rtype = None # parse docstring if function.__doc__ is not None and not", "whom the Software is # furnished to do so, subject to the following", "schema.name == 'default': if self.autotype and self.ref is None: self.ref = None if", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "-------------------------------------------------------------------- \"\"\"Python language schemas utilities.\"\"\" from re import compile as re_compile from b3j0f.utils.version", "schemas.append(rschema) if len(rtypes) > 1: rtype = OneOfSchema(schemas=schemas, nullable=True) else: rtype = schemas[0]", "utilities.\"\"\" from re import compile as re_compile from b3j0f.utils.version import OrderedDict from b3j0f.utils.path", "for selfparam in self.params: selfparams[selfparam.name] = selfparam index = 0 for index, pkwarg", "functools import wraps __all__ = [ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder):", "kwargs: schema attributes to set. :rtype: type :return: schema class. \"\"\" if _cls", "is # furnished to do so, subject to the following conditions: # #", "charge of building python classes.\"\"\" __name__ = 'python' def build(self, _resource, **kwargs): if", "schema class. \"\"\" if _cls is None: return lambda _cls: buildschema(_cls=_cls, **kwargs) result", "found.' raise ImportError( msg.format(pname, ptype, function) ) try: schemacls = datatype2schemacls(lkptype) except TypeError:", "-*- coding: utf-8 -*- # -------------------------------------------------------------------- # The MIT License (MIT) # #", "attrname and attrname[0] != '_' and attrname not in kwargs and not hasattr(Schema,", "rtypes = rrtype.split(',') schemas = [] for rtype_ in rtypes: rtype_ = rtype_.strip()", ") if issubclass(_resource, Schema): result = _resource else: result = datatype2schemacls(_datatype=_resource, _force=False) if", "owner, *args, **kwargs): ElementarySchema._validate(self, data=data, *args, **kwargs) if data != self.default or data", "= ptype.split(',') schemas = [] for ptype in ptypes: ptype = ptype.strip() islist", "function) ) try: schemacls = datatype2schemacls(lkrtype) except TypeError: schemacls = ParamTypeSchema(type=lkrtype) rschema =", "ptypes: ptype = ptype.strip() islist = False try: lkptype = lookup(ptype, scope=scope) except", "ptype in ptypes: ptype = ptype.strip() islist = False try: lkptype = lookup(ptype,", "import Schema, DynamicValue from .factory import SchemaBuilder, build from ..elementary import ElementarySchema, ArraySchema,", "value, *args, **kwargs) pkwargs, self.rtype, vargs, kwargs = self._getparams_rtype(value) self.vargs = vargs or", "rrtype = match[4].strip() or None if rrtype: rtypes = rrtype.split(',') schemas = []", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "params = OrderedDict() for index, arg in enumerate(args): pkwargs = { 'name': arg,", "docstring if function.__doc__ is not None and not isbuiltin(function): scope = get_function_globals(function) for", "update self ref when default is given. autotype = True mandatory = True", "import ( FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType ) from six import get_function_globals", "expected.'.format( errormsg, data.__name__, self.name ) ) params, rtype, vargs, kwargs = self._getparams_rtype(function=data) var", "args, vargs, kwargs, default = (), (), (), () indexlen = len(args) -", "import wraps __all__ = [ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder): \"\"\"In", "*args, **kwargs) if data != self.default or data is not self.default: errormsg =", "= getargspec(function) except TypeError: args, vargs, kwargs, default = (), (), (), ()", "def funcschema(default=None, *args, **kwargs): \"\"\"Decorator to use in order to transform a function", "resname = _resource.__name__ if 'name' not in kwargs: kwargs['name'] = resname for attrname", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "Wrong param length: {1}. {2} expected.'.format( errormsg, len(params), len(self.params) ) ) if self.rtype", "None: selfparam = ParamSchema(**pkwarg) else: for key in pkwarg: val = pkwarg[key] if", "_REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__ = [ FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType", "mandatory = True #: if true (default), parameter value is mandatory. def _setvalue(self,", "getargspec(function) except TypeError: args, vargs, kwargs, default = (), (), (), () indexlen", "furnished to do so, subject to the following conditions: # # The above", "rtype = Schema() impl = '' impltype = '' safe = False varargs", "decorate class. :param type _cls: class to decorate. :param kwargs: schema attributes to", "{0} with {1}'.format(data, self) if data.__name__ != self.name: raise TypeError( '{0}. Wrong function", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "be included in # all copies or substantial portions of the Software. #", ":return: OrderedDict, rtype, vargs and kwargs. :rtype: tuple \"\"\" try: args, vargs, kwargs,", "pname = (match[1] or match[2]).strip() if pname and pname in params: ptype =", "for mro in schemacls.mro(): if issubclass(mro, Schema): result = mro break return result", "'' @classmethod def _getparams_rtype(cls, function): \"\"\"Get function params from input function and rtype.", "len(ptypes) > 1: pschema = OneOfSchema(schemas=schemas, nullable=True) else: pschema = schemas[0] params[pname]['ref'] =", "pkwargs['name'] default = pkwargs.get('default') param = self.params[index] if param.name != name: raise TypeError(", "for index, arg in enumerate(args): pkwargs = { 'name': arg, 'mandatory': True }", "self.name: raise TypeError( '{0}. Wrong function name {1}. {2} expected.'.format( errormsg, data.__name__, self.name", "six import get_function_globals from inspect import getargspec, getsourcelines, isclass, isbuiltin from functools import", "schemas = [] for ptype in ptypes: ptype = ptype.strip() islist = False", "not hasattr(Schema, attrname) ): attr = getattr(_resource, attrname) if not isinstance(attr, MemberDescriptorType): kwargs[attrname]", "data, *args, **kwargs): super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs) if not isinstance(data, self.type): raise TypeError(", "index = 0 for index, pkwarg in enumerate(pkwargs.values()): name = pkwarg['name'] selfparam =", "result.source = func return result def funcschema(default=None, *args, **kwargs): \"\"\"Decorator to use in", "len(params), len(self.params) ) ) if self.rtype is not None and type(self.rtype) != type(rtype):", "if selfparam is None: selfparam = ParamSchema(**pkwarg) else: for key in pkwarg: val", "from the decorate class. :param type _cls: class to decorate. :param kwargs: schema", "= OneOfSchema(schemas=schemas, nullable=True) else: rtype = schemas[0] continue pname = (match[1] or match[2]).strip()", "== 's': lkrtype = lookup( rtype_[:-1], scope=scope ) elif rtype_.startswith('list of '): lkrtype", "= func return result def funcschema(default=None, *args, **kwargs): \"\"\"Decorator to use in order", "= resname for attrname in dir(_resource): if ( attrname and attrname[0] != '_'", "'FunctionSchema', 'buildschema', 'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge of building python classes.\"\"\" __name__", "None # parse docstring if function.__doc__ is not None and not isbuiltin(function): scope", "types import ( FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType ) from six import", "param.name != name: raise TypeError( '{0}. Wrong param {1} at {2}. {3} expected.'.format(", "Copyright (c) 2016 <NAME> <<EMAIL>> # # Permission is hereby granted, free of", "utf-8 -*- # -------------------------------------------------------------------- # The MIT License (MIT) # # Copyright (c)", "'Error on ptype \"{0}\" ({1}) from {2} not found.' raise ImportError( msg.format(pname, ptype,", "= pkwarg['name'] selfparam = None # old self param if name in selfparams:", "schemacls() if islist: pschema = ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes) > 1: pschema =", "Wrong rtype {1}. {2} expected.'.format( rtype, self.rtype ) ) for index, pkwargs in", "self.name ) ) params, rtype, vargs, kwargs = self._getparams_rtype(function=data) var = self.varargs or", "or match[3]).strip() ptypes = ptype.split(',') schemas = [] for ptype in ptypes: ptype", "if param.name != name: raise TypeError( '{0}. Wrong param {1} at {2}. {3}", "\\'type\\' expected'.format(_resource) ) if issubclass(_resource, Schema): result = _resource else: result = datatype2schemacls(_datatype=_resource,", "to decorate. :param kwargs: schema attributes to set. :rtype: type :return: schema class.", "\"\"\"Get function params from input function and rtype. :return: OrderedDict, rtype, vargs and", "attrname) if not isinstance(attr, MemberDescriptorType): kwargs[attrname] = attr result = type(resname, (Schema,), kwargs)", "if index >= indexlen: # has default value value = default[index - indexlen]", "rtypes: rtype_ = rtype_.strip() islist = False try: lkrtype = lookup(rtype_, scope=scope) except", "= ParamSchema(**pkwarg) else: for key in pkwarg: val = pkwarg[key] if val is", "**kwargs) return result class ParamTypeSchema(Schema): \"\"\"In charge of embedding a parameter type which", "if true (default), update self ref when default is given. autotype = True", "copyright notice and this permission notice shall be included in # all copies", "# param kwargs if index >= indexlen: # has default value value =", "= datatype2schemacls(lkptype) except TypeError: schemacls = ParamTypeSchema(type=lkptype) pschema = schemacls() if islist: pschema", "deal # in the Software without restriction, including without limitation the rights #", "Schema, DynamicValue from .factory import SchemaBuilder, build from ..elementary import ElementarySchema, ArraySchema, OneOfSchema,", "] class PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge of building python classes.\"\"\" __name__ = 'python' def", "**kwargs) def _getter(self, obj, *args, **kwargs): func = ElementarySchema._getter(self, obj, *args, **kwargs) @wraps(func)", "decorate. :param kwargs: schema attributes to set. :rtype: type :return: schema class. \"\"\"", "attributes to set. :rtype: type :return: schema class. \"\"\" if _cls is None:", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # -------------------------------------------------------------------- \"\"\"Python", "RefSchema from types import ( FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType ) from", "= [ FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType ] params = ArraySchema(itemtype=ParamSchema()) rtype =", "class to decorate. :param kwargs: schema attributes to set. :rtype: type :return: schema", "<<EMAIL>> # # Permission is hereby granted, free of charge, to any person", "used to build a schema from the decorate class. :param type _cls: class", "!= len(self.params): raise TypeError( '{0}. Wrong param length: {1}. {2} expected.'.format( errormsg, len(params),", "if rtype is None: rrtype = match[4].strip() or None if rrtype: rtypes =", "(match[1] or match[2]).strip() if pname and pname in params: ptype = (match[0] or", "pkwargs['default'] = value pkwargs['ref'] = None if value is None else data2schema(value) pkwargs['mandatory']", "except ImportError: islist = True try: if rtype_[-1] == 's': lkrtype = lookup(", "param = self.params[index] if param.name != name: raise TypeError( '{0}. Wrong param {1}", "{2} not found.' raise ImportError( msg.format(pname, ptype, function) ) try: schemacls = datatype2schemacls(lkptype)", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "<NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any", "to whom the Software is # furnished to do so, subject to the", "getargspec, getsourcelines, isclass, isbuiltin from functools import wraps __all__ = [ 'PythonSchemaBuilder', 'FunctionSchema',", "with {1}'.format(data, self) if data.__name__ != self.name: raise TypeError( '{0}. Wrong function name", "params, rtype, vargs, kwargs = self._getparams_rtype(function=data) var = self.varargs or vargs or kwargs", "= selfparams[name] if selfparam is None: selfparam = ParamSchema(**pkwarg) else: for key in", "expected.'.format(data, self.type) ) @updatecontent class ParamSchema(RefSchema): \"\"\"Function parameter schema.\"\"\" #: if true (default),", "hasattr(Schema, attrname) ): attr = getattr(_resource, attrname) if not isinstance(attr, MemberDescriptorType): kwargs[attrname] =", "schemacls = datatype2schemacls(lkptype) except TypeError: schemacls = ParamTypeSchema(type=lkptype) pschema = schemacls() if islist:", "default = (), (), (), () indexlen = len(args) - (0 if default", "#: if true (default), update self ref when default is given. autotype =", "schemacls = datatype2schemacls(lkrtype) except TypeError: schemacls = ParamTypeSchema(type=lkrtype) rschema = schemacls() if islist:", "schema.name == 'default': self._setter(obj=self, value=value) def _setter(self, obj, value, *args, **kwargs): if hasattr(self,", "attrname[0] != '_' and attrname not in kwargs and not hasattr(Schema, attrname) ):", "if self.autotype and self.ref is None: self.ref = None if value is None", "param length: {1}. {2} expected.'.format( errormsg, len(params), len(self.params) ) ) if self.rtype is", "'python' try: self.impl = str(getsourcelines(value)) except TypeError: self.impl = '' @classmethod def _getparams_rtype(cls,", "**kwargs) if schema.name == 'default': if self.autotype and self.ref is None: self.ref =", "type {0}, \\'type\\' expected'.format(_resource) ) if issubclass(_resource, Schema): result = _resource else: result", "2016 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "autotype = True mandatory = True #: if true (default), parameter value is", "pkwarg['name'] selfparam = None # old self param if name in selfparams: selfparam", "[ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge of building python", "): attr = getattr(_resource, attrname) if not isinstance(attr, MemberDescriptorType): kwargs[attrname] = attr result", "schemas[0] continue pname = (match[1] or match[2]).strip() if pname and pname in params:", "= 'python' def build(self, _resource, **kwargs): if not isclass(_resource): raise TypeError( 'Wrong type", "class ParamSchema(RefSchema): \"\"\"Function parameter schema.\"\"\" #: if true (default), update self ref when", "= str(getsourcelines(value)) except TypeError: self.impl = '' @classmethod def _getparams_rtype(cls, function): \"\"\"Get function", "*args, **kwargs): super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs) if not isinstance(data, self.type): raise TypeError( 'Wrong", "# -------------------------------------------------------------------- # The MIT License (MIT) # # Copyright (c) 2016 <NAME>", "self.type): raise TypeError( 'Wrong type of {0}. {1} expected.'.format(data, self.type) ) @updatecontent class", "not None: self.mandatory = False class FunctionSchema(ElementarySchema): \"\"\"Function schema. Dedicated to describe functions,", "default is None: return lambda default: funcschema(default=default, *args, **kwargs) return FunctionSchema(default=default, *args, **kwargs)", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "wraps __all__ = [ 'PythonSchemaBuilder', 'FunctionSchema', 'buildschema', 'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge", "sell # copies of the Software, and to permit persons to whom the", "# all copies or substantial portions of the Software. # # THE SOFTWARE", "build(_cls, **kwargs) return result class ParamTypeSchema(Schema): \"\"\"In charge of embedding a parameter type", "and not hasattr(Schema, attrname) ): attr = getattr(_resource, attrname) if not isinstance(attr, MemberDescriptorType):", "not found.' raise ImportError( msg.format(pname, ptype, function) ) try: schemacls = datatype2schemacls(lkptype) except", "return self.olddefault = value ElementarySchema._setter(self, obj, value, *args, **kwargs) pkwargs, self.rtype, vargs, kwargs", "OrderedDict, rtype, vargs and kwargs. :rtype: tuple \"\"\" try: args, vargs, kwargs, default", "PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge of building python classes.\"\"\" __name__ = 'python' def build(self, _resource,", "MemberDescriptorType ) from six import get_function_globals from inspect import getargspec, getsourcelines, isclass, isbuiltin", "a schema from the decorate class. :param type _cls: class to decorate. :param", "schema attributes to set. :rtype: type :return: schema class. \"\"\" if _cls is", "all copies or substantial portions of the Software. # # THE SOFTWARE IS", "of embedding a parameter type which met a problem while generating a schema.\"\"\"", "MIT License (MIT) # # Copyright (c) 2016 <NAME> <<EMAIL>> # # Permission", "Schema() impl = '' impltype = '' safe = False varargs = False", "data2schema(value) if value is not None: self.mandatory = False class FunctionSchema(ElementarySchema): \"\"\"Function schema.", "ImportError( msg.format(rtype_, rrtype, function) ) try: schemacls = datatype2schemacls(lkrtype) except TypeError: schemacls =", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "def _getparams_rtype(cls, function): \"\"\"Get function params from input function and rtype. :return: OrderedDict,", "{2} not found.' raise ImportError( msg.format(rtype_, rrtype, function) ) try: schemacls = datatype2schemacls(lkrtype)", "function into a schema.\"\"\" if default is None: return lambda default: funcschema(default=default, *args,", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "raise ImportError( msg.format(rtype_, rrtype, function) ) try: schemacls = datatype2schemacls(lkrtype) except TypeError: schemacls", "= re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__ = [ FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType ]", "files (the \"Software\"), to deal # in the Software without restriction, including without", "is None else data2schema(value) pkwargs['mandatory'] = False params[arg] = pkwargs rtype = None", "OrderedDict() for index, arg in enumerate(args): pkwargs = { 'name': arg, 'mandatory': True", "pkwargs['ref'] = None if value is None else data2schema(value) pkwargs['mandatory'] = False params[arg]", "= 0 for index, pkwarg in enumerate(pkwargs.values()): name = pkwarg['name'] selfparam = None", "rtype_ = rtype_.strip() islist = False try: lkrtype = lookup(rtype_, scope=scope) except ImportError:", "following conditions: # # The above copyright notice and this permission notice shall", "arg in enumerate(args): pkwargs = { 'name': arg, 'mandatory': True } # param", "kwargs. :rtype: tuple \"\"\" try: args, vargs, kwargs, default = getargspec(function) except TypeError:", "get_function_globals(function) for match in cls._REC.findall(function.__doc__): if rtype is None: rrtype = match[4].strip() or", "= default[index - indexlen] pkwargs['default'] = value pkwargs['ref'] = None if value is", "break return result def buildschema(_cls=None, **kwargs): \"\"\"Class decorator used to build a schema", "The above copyright notice and this permission notice shall be included in #", "#: if true (default), parameter value is mandatory. def _setvalue(self, schema, value, *args,", "**kwargs): \"\"\"Decorator to use in order to transform a function into a schema.\"\"\"", "in ptypes: ptype = ptype.strip() islist = False try: lkptype = lookup(ptype, scope=scope)", "= pkwargs.get('default') param = self.params[index] if param.name != name: raise TypeError( '{0}. Wrong", "a problem while generating a schema.\"\"\" type = TypeSchema() def _validate(self, data, *args,", "result = mro break return result def buildschema(_cls=None, **kwargs): \"\"\"Class decorator used to", "rtype. :return: OrderedDict, rtype, vargs and kwargs. :rtype: tuple \"\"\" try: args, vargs,", "'mro'): for mro in schemacls.mro(): if issubclass(mro, Schema): result = mro break return", "else len(default)) params = OrderedDict() for index, arg in enumerate(args): pkwargs = {", "rtype_[-1] == 's': lkrtype = lookup( rtype_[:-1], scope=scope ) elif rtype_.startswith('list of '):", "kwargs = self._getparams_rtype(value) self.vargs = vargs or kwargs params = [] selfparams =", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "*args, **kwargs): super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs) if schema.name == 'default': if self.autotype", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "or vargs or kwargs if (not var) and len(params) != len(self.params): raise TypeError(", "..elementary import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema from ..utils import updatecontent, data2schema, datatype2schemacls, RefSchema", "DynamicValue): val = val() if ( val is not None and default is", "= self._getparams_rtype(value) self.vargs = vargs or kwargs params = [] selfparams = {}", "match[2]).strip() if pname and pname in params: ptype = (match[0] or match[3]).strip() ptypes", "index, pkwarg in enumerate(pkwargs.values()): name = pkwarg['name'] selfparam = None # old self", "default = getargspec(function) except TypeError: args, vargs, kwargs, default = (), (), (),", "ptype \"{0}\" ({1}) from {2} not found.' raise ImportError( msg.format(pname, ptype, function) )", "notice shall be included in # all copies or substantial portions of the", ">= indexlen: # has default value value = default[index - indexlen] pkwargs['default'] =", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "{2} expected.'.format( errormsg, data.__name__, self.name ) ) params, rtype, vargs, kwargs = self._getparams_rtype(function=data)", "(0 if default is None else len(default)) params = OrderedDict() for index, arg", "else data2schema(value) if value is not None: self.mandatory = False class FunctionSchema(ElementarySchema): \"\"\"Function", "__call__(self, *args, **kwargs): return self.default(*args, **kwargs) def _getter(self, obj, *args, **kwargs): func =", "getattr(_resource, attrname) if not isinstance(attr, MemberDescriptorType): kwargs[attrname] = attr result = type(resname, (Schema,),", "buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs) return result class ParamTypeSchema(Schema): \"\"\"In charge of", "def _getter(self, obj, *args, **kwargs): func = ElementarySchema._getter(self, obj, *args, **kwargs) @wraps(func) def", "**kwargs): if not isclass(_resource): raise TypeError( 'Wrong type {0}, \\'type\\' expected'.format(_resource) ) if", "ptype.split(',') schemas = [] for ptype in ptypes: ptype = ptype.strip() islist =", "def _setvalue(self, schema, value, *args, **kwargs): super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs) if schema.name", "default[index - indexlen] pkwargs['default'] = value pkwargs['ref'] = None if value is None", "value: return self.olddefault = value ElementarySchema._setter(self, obj, value, *args, **kwargs) pkwargs, self.rtype, vargs,", "return result class ParamTypeSchema(Schema): \"\"\"In charge of embedding a parameter type which met", "None if value is None else data2schema(value) if value is not None: self.mandatory", "or None if rrtype: rtypes = rrtype.split(',') schemas = [] for rtype_ in", "__name__ = 'python' def build(self, _resource, **kwargs): if not isclass(_resource): raise TypeError( 'Wrong", "if name in selfparams: selfparam = selfparams[name] if selfparam is None: selfparam =", "nullable=True) else: rtype = schemas[0] continue pname = (match[1] or match[2]).strip() if pname", "*args, **kwargs) @wraps(func) def result(*args, **kwargs): try: result = func(obj, *args, **kwargs) except", "'Wrong type of {0}. {1} expected.'.format(data, self.type) ) @updatecontent class ParamSchema(RefSchema): \"\"\"Function parameter", "self.params[index] if param.name != name: raise TypeError( '{0}. Wrong param {1} at {2}.", "{3}. Expected {4}.'.format( errormsg, name, default, index, val ) ) def _setvalue(self, schema,", "default is not None and val != default ): raise TypeError( '{0}. Wrong", "= func(*args, **kwargs) return result result.source = func return result def funcschema(default=None, *args,", "type = TypeSchema() def _validate(self, data, *args, **kwargs): super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs) if", "in enumerate(pkwargs.values()): name = pkwarg['name'] selfparam = None # old self param if", "and not isbuiltin(function): scope = get_function_globals(function) for match in cls._REC.findall(function.__doc__): if rtype is", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "re import compile as re_compile from b3j0f.utils.version import OrderedDict from b3j0f.utils.path import lookup", "result = datatype2schemacls(_datatype=_resource, _force=False) if result is None: resname = _resource.__name__ if 'name'", "rtype_ in rtypes: rtype_ = rtype_.strip() islist = False try: lkrtype = lookup(rtype_,", "import OrderedDict from b3j0f.utils.path import lookup from ..base import Schema, DynamicValue from .factory", "vargs, kwargs, default = (), (), (), () indexlen = len(args) - (0", "except ImportError: islist = True try: if ptype[-1] == 's': lkptype = lookup(ptype[:-1],", "parse docstring if function.__doc__ is not None and not isbuiltin(function): scope = get_function_globals(function)", "to the following conditions: # # The above copyright notice and this permission", "getresource(self, schemacls): result = None if hasattr(schemacls, 'mro'): for mro in schemacls.mro(): if", "return result def funcschema(default=None, *args, **kwargs): \"\"\"Decorator to use in order to transform", "for index, pkwargs in enumerate(params.values()): name = pkwargs['name'] default = pkwargs.get('default') param =", "'python' def build(self, _resource, **kwargs): if not isclass(_resource): raise TypeError( 'Wrong type {0},", "coding: utf-8 -*- # -------------------------------------------------------------------- # The MIT License (MIT) # # Copyright", "= rrtype.split(',') schemas = [] for rtype_ in rtypes: rtype_ = rtype_.strip() islist", "try: lkptype = lookup(ptype, scope=scope) except ImportError: islist = True try: if ptype[-1]", "def build(self, _resource, **kwargs): if not isclass(_resource): raise TypeError( 'Wrong type {0}, \\'type\\'", "= 'python' try: self.impl = str(getsourcelines(value)) except TypeError: self.impl = '' @classmethod def", "'name': arg, 'mandatory': True } # param kwargs if index >= indexlen: #", "or data is not self.default: errormsg = 'Error while validating {0} with {1}'.format(data,", "params = ArraySchema(itemtype=ParamSchema()) rtype = Schema() impl = '' impltype = '' safe", "transform a function into a schema.\"\"\" if default is None: return lambda default:", "BuiltinMethodType, MemberDescriptorType ) from six import get_function_globals from inspect import getargspec, getsourcelines, isclass,", "Software is # furnished to do so, subject to the following conditions: #", "if rrtype: rtypes = rrtype.split(',') schemas = [] for rtype_ in rtypes: rtype_", "schema, value, *args, **kwargs): super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs) if schema.name == 'default':", "default ): raise TypeError( '{0}. Wrong val {1}/{2} at {3}. Expected {4}.'.format( errormsg,", "enumerate(pkwargs.values()): name = pkwarg['name'] selfparam = None # old self param if name", "from ..base import Schema, DynamicValue from .factory import SchemaBuilder, build from ..elementary import", ") else: raise except ImportError: msg = 'rtype \"{0}\" ({1}) from {2} not", "ArraySchema, OneOfSchema, TypeSchema from ..utils import updatecontent, data2schema, datatype2schemacls, RefSchema from types import", "**kwargs) @wraps(func) def result(*args, **kwargs): try: result = func(obj, *args, **kwargs) except TypeError:", "= 'Error on ptype \"{0}\" ({1}) from {2} not found.' raise ImportError( msg.format(pname,", "raise TypeError( '{0}. Wrong function name {1}. {2} expected.'.format( errormsg, data.__name__, self.name )", "1: rtype = OneOfSchema(schemas=schemas, nullable=True) else: rtype = schemas[0] continue pname = (match[1]", "False class FunctionSchema(ElementarySchema): \"\"\"Function schema. Dedicated to describe functions, methods and lambda objects.", "func(obj, *args, **kwargs) except TypeError: result = func(*args, **kwargs) return result result.source =", "except TypeError: args, vargs, kwargs, default = (), (), (), () indexlen =", "name, index, param.name ) ) val = param.default if isinstance(val, DynamicValue): val =", "else: rtype = schemas[0] continue pname = (match[1] or match[2]).strip() if pname and", "and val != default ): raise TypeError( '{0}. Wrong val {1}/{2} at {3}.", ") for index, pkwargs in enumerate(params.values()): name = pkwargs['name'] default = pkwargs.get('default') param", "from b3j0f.utils.version import OrderedDict from b3j0f.utils.path import lookup from ..base import Schema, DynamicValue", "\"\"\"In charge of embedding a parameter type which met a problem while generating", "mro break return result def buildschema(_cls=None, **kwargs): \"\"\"Class decorator used to build a", "val) params.append(selfparam) self.params = params self.impltype = 'python' try: self.impl = str(getsourcelines(value)) except", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "value pkwargs['ref'] = None if value is None else data2schema(value) pkwargs['mandatory'] = False", "pname in params: ptype = (match[0] or match[3]).strip() ptypes = ptype.split(',') schemas =", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "class. \"\"\" if _cls is None: return lambda _cls: buildschema(_cls=_cls, **kwargs) result =", "vargs, kwargs def __call__(self, *args, **kwargs): return self.default(*args, **kwargs) def _getter(self, obj, *args,", "Schema): result = mro break return result def buildschema(_cls=None, **kwargs): \"\"\"Class decorator used", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "= [] for ptype in ptypes: ptype = ptype.strip() islist = False try:", "the following conditions: # # The above copyright notice and this permission notice", "'buildschema', 'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge of building python classes.\"\"\" __name__ =", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "match[4].strip() or None if rrtype: rtypes = rrtype.split(',') schemas = [] for rtype_", ") ) def _setvalue(self, schema, value): if schema.name == 'default': self._setter(obj=self, value=value) def", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "if value is None else data2schema(value) pkwargs['mandatory'] = False params[arg] = pkwargs rtype", "lookup(rtype_, scope=scope) except ImportError: islist = True try: if rtype_[-1] == 's': lkrtype", "False try: lkrtype = lookup(rtype_, scope=scope) except ImportError: islist = True try: if", "# Copyright (c) 2016 <NAME> <<EMAIL>> # # Permission is hereby granted, free", "BuiltinMethodType ] params = ArraySchema(itemtype=ParamSchema()) rtype = Schema() impl = '' impltype =", "class. :param type _cls: class to decorate. :param kwargs: schema attributes to set.", "*args, **kwargs): if hasattr(self, 'olddefault'): if self.olddefault is value: return self.olddefault = value", "Dedicated to describe functions, methods and lambda objects. \"\"\" _PDESC = r':param (?P<ptype1>[\\w_,]+)", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "'): lkptype = lookup(ptype[8:], scope=scope) else: raise except ImportError: msg = 'Error on", "{1}. {2} expected.'.format( errormsg, data.__name__, self.name ) ) params, rtype, vargs, kwargs =", "to do so, subject to the following conditions: # # The above copyright", "_force=False) if result is None: resname = _resource.__name__ if 'name' not in kwargs:", "name = pkwarg['name'] selfparam = None # old self param if name in", "FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType ] params = ArraySchema(itemtype=ParamSchema()) rtype = Schema() impl", "name {1}. {2} expected.'.format( errormsg, data.__name__, self.name ) ) params, rtype, vargs, kwargs", "\"\"\"Python language schemas utilities.\"\"\" from re import compile as re_compile from b3j0f.utils.version import", "r':rtype:(?P<rtype>[^\\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__ = [ FunctionType, MethodType, LambdaType, BuiltinFunctionType,", "errormsg = 'Error while validating {0} with {1}'.format(data, self) if data.__name__ != self.name:", ") elif rtype_.startswith('list of '): lkrtype = lookup( rtype_[8:], scope=scope ) else: raise", "= ptype.strip() islist = False try: lkptype = lookup(ptype, scope=scope) except ImportError: islist", "has default value value = default[index - indexlen] pkwargs['default'] = value pkwargs['ref'] =", "raise TypeError( 'Wrong type {0}, \\'type\\' expected'.format(_resource) ) if issubclass(_resource, Schema): result =", "attrname) ): attr = getattr(_resource, attrname) if not isinstance(attr, MemberDescriptorType): kwargs[attrname] = attr", "*args, **kwargs) except TypeError: result = func(*args, **kwargs) return result result.source = func", "result = func(*args, **kwargs) return result result.source = func return result def funcschema(default=None,", "met a problem while generating a schema.\"\"\" type = TypeSchema() def _validate(self, data,", "Wrong function name {1}. {2} expected.'.format( errormsg, data.__name__, self.name ) ) params, rtype,", "TypeError: schemacls = ParamTypeSchema(type=lkptype) pschema = schemacls() if islist: pschema = ArraySchema(itemtype=pschema) schemas.append(pschema)", "= lookup(ptype, scope=scope) except ImportError: islist = True try: if ptype[-1] == 's':", "ptype, function) ) try: schemacls = datatype2schemacls(lkptype) except TypeError: schemacls = ParamTypeSchema(type=lkptype) pschema", "IN THE # SOFTWARE. # -------------------------------------------------------------------- \"\"\"Python language schemas utilities.\"\"\" from re import", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "pkwarg: val = pkwarg[key] if val is not None: setattr(selfparam, key, val) params.append(selfparam)", "'Wrong type {0}, \\'type\\' expected'.format(_resource) ) if issubclass(_resource, Schema): result = _resource else:", "self.type) ) @updatecontent class ParamSchema(RefSchema): \"\"\"Function parameter schema.\"\"\" #: if true (default), update", "def result(*args, **kwargs): try: result = func(obj, *args, **kwargs) except TypeError: result =", "TypeSchema from ..utils import updatecontent, data2schema, datatype2schemacls, RefSchema from types import ( FunctionType,", "result = _resource else: result = datatype2schemacls(_datatype=_resource, _force=False) if result is None: resname", "type(self.rtype) != type(rtype): raise TypeError( '{0}. Wrong rtype {1}. {2} expected.'.format( rtype, self.rtype", "val is not None: setattr(selfparam, key, val) params.append(selfparam) self.params = params self.impltype =", "TypeError: result = func(*args, **kwargs) return result result.source = func return result def", "self.rtype is not None and type(self.rtype) != type(rtype): raise TypeError( '{0}. Wrong rtype", "# old self param if name in selfparams: selfparam = selfparams[name] if selfparam", "permission notice shall be included in # all copies or substantial portions of", "*args, **kwargs) if not isinstance(data, self.type): raise TypeError( 'Wrong type of {0}. {1}", "-------------------------------------------------------------------- # The MIT License (MIT) # # Copyright (c) 2016 <NAME> <<EMAIL>>", "pkwargs in enumerate(params.values()): name = pkwargs['name'] default = pkwargs.get('default') param = self.params[index] if", "\"\"\" if _cls is None: return lambda _cls: buildschema(_cls=_cls, **kwargs) result = build(_cls,", "**kwargs) except TypeError: result = func(*args, **kwargs) return result result.source = func return", "LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType ) from six import get_function_globals from inspect import getargspec,", "_setvalue(self, schema, value, *args, **kwargs): super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs) if schema.name ==", "len(args) - (0 if default is None else len(default)) params = OrderedDict() for", "ImportError: msg = 'rtype \"{0}\" ({1}) from {2} not found.' raise ImportError( msg.format(rtype_,", "ptype[-1] == 's': lkptype = lookup(ptype[:-1], scope=scope) elif ptype.startswith('list of '): lkptype =", "shall be included in # all copies or substantial portions of the Software.", "'{0}. Wrong function name {1}. {2} expected.'.format( errormsg, data.__name__, self.name ) ) params,", "def _setvalue(self, schema, value): if schema.name == 'default': self._setter(obj=self, value=value) def _setter(self, obj,", "= type(resname, (Schema,), kwargs) return result def getresource(self, schemacls): result = None if", "import compile as re_compile from b3j0f.utils.version import OrderedDict from b3j0f.utils.path import lookup from", "if self.rtype is not None and type(self.rtype) != type(rtype): raise TypeError( '{0}. Wrong", "None else len(default)) params = OrderedDict() for index, arg in enumerate(args): pkwargs =", "scope=scope) else: raise except ImportError: msg = 'Error on ptype \"{0}\" ({1}) from", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "kwargs: kwargs['name'] = resname for attrname in dir(_resource): if ( attrname and attrname[0]", "ParamSchema(RefSchema): \"\"\"Function parameter schema.\"\"\" #: if true (default), update self ref when default", "'ParamSchema' ] class PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge of building python classes.\"\"\" __name__ = 'python'", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "for ptype in ptypes: ptype = ptype.strip() islist = False try: lkptype =", "= build(_cls, **kwargs) return result class ParamTypeSchema(Schema): \"\"\"In charge of embedding a parameter", "type _cls: class to decorate. :param kwargs: schema attributes to set. :rtype: type", "def _setter(self, obj, value, *args, **kwargs): if hasattr(self, 'olddefault'): if self.olddefault is value:", "pkwargs, self.rtype, vargs, kwargs = self._getparams_rtype(value) self.vargs = vargs or kwargs params =", "schemacls = ParamTypeSchema(type=lkptype) pschema = schemacls() if islist: pschema = ArraySchema(itemtype=pschema) schemas.append(pschema) if", "rtype_[:-1], scope=scope ) elif rtype_.startswith('list of '): lkrtype = lookup( rtype_[8:], scope=scope )", "'' impltype = '' safe = False varargs = False def _validate(self, data,", "True try: if ptype[-1] == 's': lkptype = lookup(ptype[:-1], scope=scope) elif ptype.startswith('list of", "..utils import updatecontent, data2schema, datatype2schemacls, RefSchema from types import ( FunctionType, MethodType, LambdaType,", ") val = param.default if isinstance(val, DynamicValue): val = val() if ( val", "schemas.append(pschema) if len(ptypes) > 1: pschema = OneOfSchema(schemas=schemas, nullable=True) else: pschema = schemas[0]", "(not var) and len(params) != len(self.params): raise TypeError( '{0}. Wrong param length: {1}.", "== 'default': if self.autotype and self.ref is None: self.ref = None if value", "DynamicValue from .factory import SchemaBuilder, build from ..elementary import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema", "= pkwargs['name'] default = pkwargs.get('default') param = self.params[index] if param.name != name: raise", ") ) params, rtype, vargs, kwargs = self._getparams_rtype(function=data) var = self.varargs or vargs", "if ( attrname and attrname[0] != '_' and attrname not in kwargs and", "pschema return params, rtype, vargs, kwargs def __call__(self, *args, **kwargs): return self.default(*args, **kwargs)", "if pname and pname in params: ptype = (match[0] or match[3]).strip() ptypes =", "SchemaBuilder, build from ..elementary import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema from ..utils import updatecontent,", "selfparams[selfparam.name] = selfparam index = 0 for index, pkwarg in enumerate(pkwargs.values()): name =", "schema from the decorate class. :param type _cls: class to decorate. :param kwargs:", "to transform a function into a schema.\"\"\" if default is None: return lambda", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "set. :rtype: type :return: schema class. \"\"\" if _cls is None: return lambda", "False varargs = False def _validate(self, data, owner, *args, **kwargs): ElementarySchema._validate(self, data=data, *args,", "data2schema, datatype2schemacls, RefSchema from types import ( FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType", "_PTYPE = r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__", "BuiltinFunctionType, BuiltinMethodType ] params = ArraySchema(itemtype=ParamSchema()) rtype = Schema() impl = '' impltype", "type of {0}. {1} expected.'.format(data, self.type) ) @updatecontent class ParamSchema(RefSchema): \"\"\"Function parameter schema.\"\"\"", "!= name: raise TypeError( '{0}. Wrong param {1} at {2}. {3} expected.'.format( errormsg,", "else data2schema(value) pkwargs['mandatory'] = False params[arg] = pkwargs rtype = None # parse", "raise except ImportError: msg = 'rtype \"{0}\" ({1}) from {2} not found.' raise", "\"\"\"Function parameter schema.\"\"\" #: if true (default), update self ref when default is", "*args, **kwargs): \"\"\"Decorator to use in order to transform a function into a", "= pkwargs rtype = None # parse docstring if function.__doc__ is not None", "( FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType ) from six import get_function_globals from", "def _validate(self, data, owner, *args, **kwargs): ElementarySchema._validate(self, data=data, *args, **kwargs) if data !=", "for attrname in dir(_resource): if ( attrname and attrname[0] != '_' and attrname", "= '' safe = False varargs = False def _validate(self, data, owner, *args,", "# in the Software without restriction, including without limitation the rights # to", "**kwargs): func = ElementarySchema._getter(self, obj, *args, **kwargs) @wraps(func) def result(*args, **kwargs): try: result", "not None and default is not None and val != default ): raise", "params: ptype = (match[0] or match[3]).strip() ptypes = ptype.split(',') schemas = [] for", "raise TypeError( '{0}. Wrong rtype {1}. {2} expected.'.format( rtype, self.rtype ) ) for", "key, val) params.append(selfparam) self.params = params self.impltype = 'python' try: self.impl = str(getsourcelines(value))", "params[arg] = pkwargs rtype = None # parse docstring if function.__doc__ is not", "expected'.format(_resource) ) if issubclass(_resource, Schema): result = _resource else: result = datatype2schemacls(_datatype=_resource, _force=False)", "funcschema(default=None, *args, **kwargs): \"\"\"Decorator to use in order to transform a function into", "class ParamTypeSchema(Schema): \"\"\"In charge of embedding a parameter type which met a problem", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "islist = False try: lkrtype = lookup(rtype_, scope=scope) except ImportError: islist = True", "] params = ArraySchema(itemtype=ParamSchema()) rtype = Schema() impl = '' impltype = ''", "kwargs) return result def getresource(self, schemacls): result = None if hasattr(schemacls, 'mro'): for", "schemas = [] for rtype_ in rtypes: rtype_ = rtype_.strip() islist = False", "if (not var) and len(params) != len(self.params): raise TypeError( '{0}. Wrong param length:", "'mandatory': True } # param kwargs if index >= indexlen: # has default", "indexlen = len(args) - (0 if default is None else len(default)) params =", "from {2} not found.' raise ImportError( msg.format(rtype_, rrtype, function) ) try: schemacls =", ":rtype: type :return: schema class. \"\"\" if _cls is None: return lambda _cls:", "is given. autotype = True mandatory = True #: if true (default), parameter", "= lookup(ptype[8:], scope=scope) else: raise except ImportError: msg = 'Error on ptype \"{0}\"", "MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType ] params = ArraySchema(itemtype=ParamSchema()) rtype = Schema() impl =", "self.varargs or vargs or kwargs if (not var) and len(params) != len(self.params): raise", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "= val() if ( val is not None and default is not None", "obj, value, *args, **kwargs): if hasattr(self, 'olddefault'): if self.olddefault is value: return self.olddefault", "lkrtype = lookup( rtype_[:-1], scope=scope ) elif rtype_.startswith('list of '): lkrtype = lookup(", "self.impltype = 'python' try: self.impl = str(getsourcelines(value)) except TypeError: self.impl = '' @classmethod", "if true (default), parameter value is mandatory. def _setvalue(self, schema, value, *args, **kwargs):", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "key in pkwarg: val = pkwarg[key] if val is not None: setattr(selfparam, key,", "= [] for rtype_ in rtypes: rtype_ = rtype_.strip() islist = False try:", "= (match[0] or match[3]).strip() ptypes = ptype.split(',') schemas = [] for ptype in", "*args, **kwargs) pkwargs, self.rtype, vargs, kwargs = self._getparams_rtype(value) self.vargs = vargs or kwargs", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "rrtype: rtypes = rrtype.split(',') schemas = [] for rtype_ in rtypes: rtype_ =", "= schemas[0] continue pname = (match[1] or match[2]).strip() if pname and pname in", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "is not None: self.mandatory = False class FunctionSchema(ElementarySchema): \"\"\"Function schema. Dedicated to describe", "{1}. {2} expected.'.format( errormsg, len(params), len(self.params) ) ) if self.rtype is not None", "safe = False varargs = False def _validate(self, data, owner, *args, **kwargs): ElementarySchema._validate(self,", "kwargs = self._getparams_rtype(function=data) var = self.varargs or vargs or kwargs if (not var)", "Permission is hereby granted, free of charge, to any person obtaining a copy", "# parse docstring if function.__doc__ is not None and not isbuiltin(function): scope =", "function and rtype. :return: OrderedDict, rtype, vargs and kwargs. :rtype: tuple \"\"\" try:", "scope=scope) except ImportError: islist = True try: if ptype[-1] == 's': lkptype =", "{1}. {2} expected.'.format( rtype, self.rtype ) ) for index, pkwargs in enumerate(params.values()): name", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "Software without restriction, including without limitation the rights # to use, copy, modify,", "= self.params[index] if param.name != name: raise TypeError( '{0}. Wrong param {1} at", "self.default(*args, **kwargs) def _getter(self, obj, *args, **kwargs): func = ElementarySchema._getter(self, obj, *args, **kwargs)", "# The above copyright notice and this permission notice shall be included in", "# of this software and associated documentation files (the \"Software\"), to deal #", "\"\"\"Class decorator used to build a schema from the decorate class. :param type", "result is None: resname = _resource.__name__ if 'name' not in kwargs: kwargs['name'] =", "= datatype2schemacls(lkrtype) except TypeError: schemacls = ParamTypeSchema(type=lkrtype) rschema = schemacls() if islist: rschema", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs) if schema.name == 'default': if self.autotype and self.ref", "isinstance(attr, MemberDescriptorType): kwargs[attrname] = attr result = type(resname, (Schema,), kwargs) return result def", "(), () indexlen = len(args) - (0 if default is None else len(default))", "val is not None and default is not None and val != default", "_PDESC = r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE = r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\\n]+)' _REC", "pkwarg[key] if val is not None: setattr(selfparam, key, val) params.append(selfparam) self.params = params", "self.params: selfparams[selfparam.name] = selfparam index = 0 for index, pkwarg in enumerate(pkwargs.values()): name", "Schema): result = _resource else: result = datatype2schemacls(_datatype=_resource, _force=False) if result is None:", "{ 'name': arg, 'mandatory': True } # param kwargs if index >= indexlen:", "self.ref is None: self.ref = None if value is None else data2schema(value) if", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "if data != self.default or data is not self.default: errormsg = 'Error while", ") if self.rtype is not None and type(self.rtype) != type(rtype): raise TypeError( '{0}.", "> 1: rtype = OneOfSchema(schemas=schemas, nullable=True) else: rtype = schemas[0] continue pname =", "and rtype. :return: OrderedDict, rtype, vargs and kwargs. :rtype: tuple \"\"\" try: args,", "def getresource(self, schemacls): result = None if hasattr(schemacls, 'mro'): for mro in schemacls.mro():", "Wrong param {1} at {2}. {3} expected.'.format( errormsg, name, index, param.name ) )", "vargs or kwargs params = [] selfparams = {} for selfparam in self.params:", "= pkwarg[key] if val is not None: setattr(selfparam, key, val) params.append(selfparam) self.params =", "self param if name in selfparams: selfparam = selfparams[name] if selfparam is None:", "except TypeError: result = func(*args, **kwargs) return result result.source = func return result", "rtype, vargs, kwargs def __call__(self, *args, **kwargs): return self.default(*args, **kwargs) def _getter(self, obj,", "r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__ = [", "= ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes) > 1: rtype = OneOfSchema(schemas=schemas, nullable=True) else: rtype", "decorator used to build a schema from the decorate class. :param type _cls:", "schemas utilities.\"\"\" from re import compile as re_compile from b3j0f.utils.version import OrderedDict from", "def _validate(self, data, *args, **kwargs): super(ParamTypeSchema, self)._validate(data=data, *args, **kwargs) if not isinstance(data, self.type):", "for match in cls._REC.findall(function.__doc__): if rtype is None: rrtype = match[4].strip() or None", "= schemacls() if islist: pschema = ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes) > 1: pschema", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "old self param if name in selfparams: selfparam = selfparams[name] if selfparam is", "r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE = r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC,", "if result is None: resname = _resource.__name__ if 'name' not in kwargs: kwargs['name']", "or kwargs params = [] selfparams = {} for selfparam in self.params: selfparams[selfparam.name]", "'{0}. Wrong val {1}/{2} at {3}. Expected {4}.'.format( errormsg, name, default, index, val", "not found.' raise ImportError( msg.format(rtype_, rrtype, function) ) try: schemacls = datatype2schemacls(lkrtype) except", "**kwargs): return self.default(*args, **kwargs) def _getter(self, obj, *args, **kwargs): func = ElementarySchema._getter(self, obj,", "python classes.\"\"\" __name__ = 'python' def build(self, _resource, **kwargs): if not isclass(_resource): raise", "and self.ref is None: self.ref = None if value is None else data2schema(value)", "is not None and val != default ): raise TypeError( '{0}. Wrong val", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "continue pname = (match[1] or match[2]).strip() if pname and pname in params: ptype", "self.impl = str(getsourcelines(value)) except TypeError: self.impl = '' @classmethod def _getparams_rtype(cls, function): \"\"\"Get", "updatecontent, data2schema, datatype2schemacls, RefSchema from types import ( FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType,", "hasattr(schemacls, 'mro'): for mro in schemacls.mro(): if issubclass(mro, Schema): result = mro break", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "TypeError( '{0}. Wrong rtype {1}. {2} expected.'.format( rtype, self.rtype ) ) for index,", "from types import ( FunctionType, MethodType, LambdaType, BuiltinFunctionType, BuiltinMethodType, MemberDescriptorType ) from six", "pkwarg in enumerate(pkwargs.values()): name = pkwarg['name'] selfparam = None # old self param", "of this software and associated documentation files (the \"Software\"), to deal # in", "rtype, self.rtype ) ) for index, pkwargs in enumerate(params.values()): name = pkwargs['name'] default", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "self.ref = None if value is None else data2schema(value) if value is not", "except ImportError: msg = 'Error on ptype \"{0}\" ({1}) from {2} not found.'", "and attrname not in kwargs and not hasattr(Schema, attrname) ): attr = getattr(_resource,", ":param type _cls: class to decorate. :param kwargs: schema attributes to set. :rtype:", "OneOfSchema(schemas=schemas, nullable=True) else: pschema = schemas[0] params[pname]['ref'] = pschema return params, rtype, vargs,", "function): \"\"\"Get function params from input function and rtype. :return: OrderedDict, rtype, vargs", "# The MIT License (MIT) # # Copyright (c) 2016 <NAME> <<EMAIL>> #", "'s': lkptype = lookup(ptype[:-1], scope=scope) elif ptype.startswith('list of '): lkptype = lookup(ptype[8:], scope=scope)", ") ) for index, pkwargs in enumerate(params.values()): name = pkwargs['name'] default = pkwargs.get('default')", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "get_function_globals from inspect import getargspec, getsourcelines, isclass, isbuiltin from functools import wraps __all__", "of building python classes.\"\"\" __name__ = 'python' def build(self, _resource, **kwargs): if not", "= (match[1] or match[2]).strip() if pname and pname in params: ptype = (match[0]", "isclass(_resource): raise TypeError( 'Wrong type {0}, \\'type\\' expected'.format(_resource) ) if issubclass(_resource, Schema): result", "value, *args, **kwargs): if hasattr(self, 'olddefault'): if self.olddefault is value: return self.olddefault =", "None if hasattr(schemacls, 'mro'): for mro in schemacls.mro(): if issubclass(mro, Schema): result =", "index >= indexlen: # has default value value = default[index - indexlen] pkwargs['default']", "not isinstance(attr, MemberDescriptorType): kwargs[attrname] = attr result = type(resname, (Schema,), kwargs) return result", "rtype_.strip() islist = False try: lkrtype = lookup(rtype_, scope=scope) except ImportError: islist =", "schemas[0] params[pname]['ref'] = pschema return params, rtype, vargs, kwargs def __call__(self, *args, **kwargs):", "default, index, val ) ) def _setvalue(self, schema, value): if schema.name == 'default':", "ElementarySchema._validate(self, data=data, *args, **kwargs) if data != self.default or data is not self.default:", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes) > 1: pschema = OneOfSchema(schemas=schemas, nullable=True) else: pschema =", "the Software without restriction, including without limitation the rights # to use, copy,", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "person obtaining a copy # of this software and associated documentation files (the", "- (0 if default is None else len(default)) params = OrderedDict() for index,", "self.olddefault = value ElementarySchema._setter(self, obj, value, *args, **kwargs) pkwargs, self.rtype, vargs, kwargs =", "build(self, _resource, **kwargs): if not isclass(_resource): raise TypeError( 'Wrong type {0}, \\'type\\' expected'.format(_resource)", "class FunctionSchema(ElementarySchema): \"\"\"Function schema. Dedicated to describe functions, methods and lambda objects. \"\"\"", "above copyright notice and this permission notice shall be included in # all", "(MIT) # # Copyright (c) 2016 <NAME> <<EMAIL>> # # Permission is hereby", "which met a problem while generating a schema.\"\"\" type = TypeSchema() def _validate(self,", "persons to whom the Software is # furnished to do so, subject to", "selfparam = ParamSchema(**pkwarg) else: for key in pkwarg: val = pkwarg[key] if val", "conditions: # # The above copyright notice and this permission notice shall be", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "OR OTHER DEALINGS IN THE # SOFTWARE. # -------------------------------------------------------------------- \"\"\"Python language schemas utilities.\"\"\"", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # --------------------------------------------------------------------", "to use in order to transform a function into a schema.\"\"\" if default", "datatype2schemacls(_datatype=_resource, _force=False) if result is None: resname = _resource.__name__ if 'name' not in", "parameter type which met a problem while generating a schema.\"\"\" type = TypeSchema()", "value is not None: self.mandatory = False class FunctionSchema(ElementarySchema): \"\"\"Function schema. Dedicated to", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "selfparam index = 0 for index, pkwarg in enumerate(pkwargs.values()): name = pkwarg['name'] selfparam", "var) and len(params) != len(self.params): raise TypeError( '{0}. Wrong param length: {1}. {2}", ") ) if self.rtype is not None and type(self.rtype) != type(rtype): raise TypeError(", "notice and this permission notice shall be included in # all copies or", "of charge, to any person obtaining a copy # of this software and", "= False params[arg] = pkwargs rtype = None # parse docstring if function.__doc__", "varargs = False def _validate(self, data, owner, *args, **kwargs): ElementarySchema._validate(self, data=data, *args, **kwargs)", "'{0}. Wrong rtype {1}. {2} expected.'.format( rtype, self.rtype ) ) for index, pkwargs", "rtype_.startswith('list of '): lkrtype = lookup( rtype_[8:], scope=scope ) else: raise except ImportError:", "pschema = schemas[0] params[pname]['ref'] = pschema return params, rtype, vargs, kwargs def __call__(self,", "try: lkrtype = lookup(rtype_, scope=scope) except ImportError: islist = True try: if rtype_[-1]", "self.rtype, vargs, kwargs = self._getparams_rtype(value) self.vargs = vargs or kwargs params = []", "lookup from ..base import Schema, DynamicValue from .factory import SchemaBuilder, build from ..elementary", "hasattr(self, 'olddefault'): if self.olddefault is value: return self.olddefault = value ElementarySchema._setter(self, obj, value,", "not self.default: errormsg = 'Error while validating {0} with {1}'.format(data, self) if data.__name__", "of {0}. {1} expected.'.format(data, self.type) ) @updatecontent class ParamSchema(RefSchema): \"\"\"Function parameter schema.\"\"\" #:", "true (default), update self ref when default is given. autotype = True mandatory", "value is mandatory. def _setvalue(self, schema, value, *args, **kwargs): super(ParamSchema, self)._setvalue(schema, value, *args,", "(?P<pname1>\\w+):' _PTYPE = r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE))", "for key in pkwarg: val = pkwarg[key] if val is not None: setattr(selfparam,", "= selfparam index = 0 for index, pkwarg in enumerate(pkwargs.values()): name = pkwarg['name']", "included in # all copies or substantial portions of the Software. # #", "pkwargs['mandatory'] = False params[arg] = pkwargs rtype = None # parse docstring if", "from input function and rtype. :return: OrderedDict, rtype, vargs and kwargs. :rtype: tuple", "= attr result = type(resname, (Schema,), kwargs) return result def getresource(self, schemacls): result", "match[3]).strip() ptypes = ptype.split(',') schemas = [] for ptype in ptypes: ptype =", "\"\"\" try: args, vargs, kwargs, default = getargspec(function) except TypeError: args, vargs, kwargs,", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "and associated documentation files (the \"Software\"), to deal # in the Software without", "lookup( rtype_[8:], scope=scope ) else: raise except ImportError: msg = 'rtype \"{0}\" ({1})", "_cls is None: return lambda _cls: buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs) return", "when default is given. autotype = True mandatory = True #: if true", "@classmethod def _getparams_rtype(cls, function): \"\"\"Get function params from input function and rtype. :return:", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "() indexlen = len(args) - (0 if default is None else len(default)) params", "a schema.\"\"\" type = TypeSchema() def _validate(self, data, *args, **kwargs): super(ParamTypeSchema, self)._validate(data=data, *args,", "setattr(selfparam, key, val) params.append(selfparam) self.params = params self.impltype = 'python' try: self.impl =", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "lambda _cls: buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs) return result class ParamTypeSchema(Schema): \"\"\"In", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "msg.format(pname, ptype, function) ) try: schemacls = datatype2schemacls(lkptype) except TypeError: schemacls = ParamTypeSchema(type=lkptype)", "pschema = ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes) > 1: pschema = OneOfSchema(schemas=schemas, nullable=True) else:", "= schemas[0] params[pname]['ref'] = pschema return params, rtype, vargs, kwargs def __call__(self, *args,", "order to transform a function into a schema.\"\"\" if default is None: return", "on ptype \"{0}\" ({1}) from {2} not found.' raise ImportError( msg.format(pname, ptype, function)", "lkrtype = lookup( rtype_[8:], scope=scope ) else: raise except ImportError: msg = 'rtype", "= None # old self param if name in selfparams: selfparam = selfparams[name]", "_getparams_rtype(cls, function): \"\"\"Get function params from input function and rtype. :return: OrderedDict, rtype,", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "[] for ptype in ptypes: ptype = ptype.strip() islist = False try: lkptype", "(default), parameter value is mandatory. def _setvalue(self, schema, value, *args, **kwargs): super(ParamSchema, self)._setvalue(schema,", "# -*- coding: utf-8 -*- # -------------------------------------------------------------------- # The MIT License (MIT) #", "in order to transform a function into a schema.\"\"\" if default is None:", "data is not self.default: errormsg = 'Error while validating {0} with {1}'.format(data, self)", "ptype = ptype.strip() islist = False try: lkptype = lookup(ptype, scope=scope) except ImportError:", "= ArraySchema(itemtype=ParamSchema()) rtype = Schema() impl = '' impltype = '' safe =", "selfparam = selfparams[name] if selfparam is None: selfparam = ParamSchema(**pkwarg) else: for key", "None: self.mandatory = False class FunctionSchema(ElementarySchema): \"\"\"Function schema. Dedicated to describe functions, methods", "attr result = type(resname, (Schema,), kwargs) return result def getresource(self, schemacls): result =", "= mro break return result def buildschema(_cls=None, **kwargs): \"\"\"Class decorator used to build", "# -------------------------------------------------------------------- \"\"\"Python language schemas utilities.\"\"\" from re import compile as re_compile from", "True try: if rtype_[-1] == 's': lkrtype = lookup( rtype_[:-1], scope=scope ) elif", "ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes) > 1: rtype = OneOfSchema(schemas=schemas, nullable=True) else: rtype =", "None else data2schema(value) if value is not None: self.mandatory = False class FunctionSchema(ElementarySchema):", "): raise TypeError( '{0}. Wrong val {1}/{2} at {3}. Expected {4}.'.format( errormsg, name,", "( attrname and attrname[0] != '_' and attrname not in kwargs and not", "'s': lkrtype = lookup( rtype_[:-1], scope=scope ) elif rtype_.startswith('list of '): lkrtype =", "in kwargs and not hasattr(Schema, attrname) ): attr = getattr(_resource, attrname) if not", "ParamSchema(**pkwarg) else: for key in pkwarg: val = pkwarg[key] if val is not", "pkwargs.get('default') param = self.params[index] if param.name != name: raise TypeError( '{0}. Wrong param", "in # all copies or substantial portions of the Software. # # THE", "( val is not None and default is not None and val !=", "if value is None else data2schema(value) if value is not None: self.mandatory =", "input function and rtype. :return: OrderedDict, rtype, vargs and kwargs. :rtype: tuple \"\"\"", "THE # SOFTWARE. # -------------------------------------------------------------------- \"\"\"Python language schemas utilities.\"\"\" from re import compile", "ImportError: islist = True try: if ptype[-1] == 's': lkptype = lookup(ptype[:-1], scope=scope)", "return result def buildschema(_cls=None, **kwargs): \"\"\"Class decorator used to build a schema from", "tuple \"\"\" try: args, vargs, kwargs, default = getargspec(function) except TypeError: args, vargs,", "_setvalue(self, schema, value): if schema.name == 'default': self._setter(obj=self, value=value) def _setter(self, obj, value,", "= r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE = r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\\n]+)' _REC =", "is None: rrtype = match[4].strip() or None if rrtype: rtypes = rrtype.split(',') schemas", "= False varargs = False def _validate(self, data, owner, *args, **kwargs): ElementarySchema._validate(self, data=data,", "param.name ) ) val = param.default if isinstance(val, DynamicValue): val = val() if", "= param.default if isinstance(val, DynamicValue): val = val() if ( val is not", "import getargspec, getsourcelines, isclass, isbuiltin from functools import wraps __all__ = [ 'PythonSchemaBuilder',", "name = pkwargs['name'] default = pkwargs.get('default') param = self.params[index] if param.name != name:", "rschema = ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes) > 1: rtype = OneOfSchema(schemas=schemas, nullable=True) else:", "params[pname]['ref'] = pschema return params, rtype, vargs, kwargs def __call__(self, *args, **kwargs): return", "free of charge, to any person obtaining a copy # of this software", "functions, methods and lambda objects. \"\"\" _PDESC = r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE =", "re_compile from b3j0f.utils.version import OrderedDict from b3j0f.utils.path import lookup from ..base import Schema,", "OrderedDict from b3j0f.utils.path import lookup from ..base import Schema, DynamicValue from .factory import", "len(self.params): raise TypeError( '{0}. Wrong param length: {1}. {2} expected.'.format( errormsg, len(params), len(self.params)", "ArraySchema(itemtype=ParamSchema()) rtype = Schema() impl = '' impltype = '' safe = False", "LambdaType, BuiltinFunctionType, BuiltinMethodType ] params = ArraySchema(itemtype=ParamSchema()) rtype = Schema() impl = ''", "lkptype = lookup(ptype[8:], scope=scope) else: raise except ImportError: msg = 'Error on ptype", "if not isinstance(data, self.type): raise TypeError( 'Wrong type of {0}. {1} expected.'.format(data, self.type)", "USE OR OTHER DEALINGS IN THE # SOFTWARE. # -------------------------------------------------------------------- \"\"\"Python language schemas", "msg = 'rtype \"{0}\" ({1}) from {2} not found.' raise ImportError( msg.format(rtype_, rrtype,", "**kwargs) return result result.source = func return result def funcschema(default=None, *args, **kwargs): \"\"\"Decorator", "= False try: lkrtype = lookup(rtype_, scope=scope) except ImportError: islist = True try:", "class PythonSchemaBuilder(SchemaBuilder): \"\"\"In charge of building python classes.\"\"\" __name__ = 'python' def build(self,", "if hasattr(schemacls, 'mro'): for mro in schemacls.mro(): if issubclass(mro, Schema): result = mro", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "to build a schema from the decorate class. :param type _cls: class to", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "(?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__ = [ FunctionType,", "kwargs params = [] selfparams = {} for selfparam in self.params: selfparams[selfparam.name] =", "or match[2]).strip() if pname and pname in params: ptype = (match[0] or match[3]).strip()", ".factory import SchemaBuilder, build from ..elementary import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema from ..utils", "name: raise TypeError( '{0}. Wrong param {1} at {2}. {3} expected.'.format( errormsg, name,", "Software, and to permit persons to whom the Software is # furnished to", "'_' and attrname not in kwargs and not hasattr(Schema, attrname) ): attr =", "attrname not in kwargs and not hasattr(Schema, attrname) ): attr = getattr(_resource, attrname)", "in cls._REC.findall(function.__doc__): if rtype is None: rrtype = match[4].strip() or None if rrtype:", "if ( val is not None and default is not None and val", "'rtype \"{0}\" ({1}) from {2} not found.' raise ImportError( msg.format(rtype_, rrtype, function) )", "is None: selfparam = ParamSchema(**pkwarg) else: for key in pkwarg: val = pkwarg[key]", "= r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE, _RTYPE)) __data_types__ =", "data.__name__, self.name ) ) params, rtype, vargs, kwargs = self._getparams_rtype(function=data) var = self.varargs", "= True #: if true (default), parameter value is mandatory. def _setvalue(self, schema,", "scope=scope ) else: raise except ImportError: msg = 'rtype \"{0}\" ({1}) from {2}", "raise TypeError( '{0}. Wrong val {1}/{2} at {3}. Expected {4}.'.format( errormsg, name, default,", ") params, rtype, vargs, kwargs = self._getparams_rtype(function=data) var = self.varargs or vargs or", "self.default: errormsg = 'Error while validating {0} with {1}'.format(data, self) if data.__name__ !=", "_cls: class to decorate. :param kwargs: schema attributes to set. :rtype: type :return:", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "**kwargs) pkwargs, self.rtype, vargs, kwargs = self._getparams_rtype(value) self.vargs = vargs or kwargs params", "= lookup(rtype_, scope=scope) except ImportError: islist = True try: if rtype_[-1] == 's':", "self.autotype and self.ref is None: self.ref = None if value is None else", "errormsg, len(params), len(self.params) ) ) if self.rtype is not None and type(self.rtype) !=", "lookup(ptype[:-1], scope=scope) elif ptype.startswith('list of '): lkptype = lookup(ptype[8:], scope=scope) else: raise except", "and lambda objects. \"\"\" _PDESC = r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE = r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)'", "**kwargs): super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs) if schema.name == 'default': if self.autotype and", "= None # parse docstring if function.__doc__ is not None and not isbuiltin(function):", "raise ImportError( msg.format(pname, ptype, function) ) try: schemacls = datatype2schemacls(lkptype) except TypeError: schemacls", "OTHER DEALINGS IN THE # SOFTWARE. # -------------------------------------------------------------------- \"\"\"Python language schemas utilities.\"\"\" from", "datatype2schemacls(lkrtype) except TypeError: schemacls = ParamTypeSchema(type=lkrtype) rschema = schemacls() if islist: rschema =", "use in order to transform a function into a schema.\"\"\" if default is", "given. autotype = True mandatory = True #: if true (default), parameter value", "> 1: pschema = OneOfSchema(schemas=schemas, nullable=True) else: pschema = schemas[0] params[pname]['ref'] = pschema", "= get_function_globals(function) for match in cls._REC.findall(function.__doc__): if rtype is None: rrtype = match[4].strip()", "self._setter(obj=self, value=value) def _setter(self, obj, value, *args, **kwargs): if hasattr(self, 'olddefault'): if self.olddefault", "ParamTypeSchema(type=lkrtype) rschema = schemacls() if islist: rschema = ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes) >", "param {1} at {2}. {3} expected.'.format( errormsg, name, index, param.name ) ) val", "schemacls = ParamTypeSchema(type=lkrtype) rschema = schemacls() if islist: rschema = ArraySchema(itemtype=rschema) schemas.append(rschema) if", "in the Software without restriction, including without limitation the rights # to use,", "problem while generating a schema.\"\"\" type = TypeSchema() def _validate(self, data, *args, **kwargs):", "errormsg, data.__name__, self.name ) ) params, rtype, vargs, kwargs = self._getparams_rtype(function=data) var =", "\"{0}\" ({1}) from {2} not found.' raise ImportError( msg.format(pname, ptype, function) ) try:", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "'default': self._setter(obj=self, value=value) def _setter(self, obj, value, *args, **kwargs): if hasattr(self, 'olddefault'): if", "Wrong val {1}/{2} at {3}. Expected {4}.'.format( errormsg, name, default, index, val )", "value, *args, **kwargs): super(ParamSchema, self)._setvalue(schema, value, *args, **kwargs) if schema.name == 'default': if", "is value: return self.olddefault = value ElementarySchema._setter(self, obj, value, *args, **kwargs) pkwargs, self.rtype,", "else: pschema = schemas[0] params[pname]['ref'] = pschema return params, rtype, vargs, kwargs def", "raise TypeError( '{0}. Wrong param length: {1}. {2} expected.'.format( errormsg, len(params), len(self.params) )", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "return result def getresource(self, schemacls): result = None if hasattr(schemacls, 'mro'): for mro", "if hasattr(self, 'olddefault'): if self.olddefault is value: return self.olddefault = value ElementarySchema._setter(self, obj,", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "[] selfparams = {} for selfparam in self.params: selfparams[selfparam.name] = selfparam index =", "schemacls() if islist: rschema = ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes) > 1: rtype =", "'name' not in kwargs: kwargs['name'] = resname for attrname in dir(_resource): if (", "import SchemaBuilder, build from ..elementary import ElementarySchema, ArraySchema, OneOfSchema, TypeSchema from ..utils import", "islist = True try: if ptype[-1] == 's': lkptype = lookup(ptype[:-1], scope=scope) elif", "found.' raise ImportError( msg.format(rtype_, rrtype, function) ) try: schemacls = datatype2schemacls(lkrtype) except TypeError:", "and/or sell # copies of the Software, and to permit persons to whom", "raise TypeError( '{0}. Wrong param {1} at {2}. {3} expected.'.format( errormsg, name, index,", "rtype, vargs, kwargs = self._getparams_rtype(function=data) var = self.varargs or vargs or kwargs if", "to set. :rtype: type :return: schema class. \"\"\" if _cls is None: return", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # -------------------------------------------------------------------- \"\"\"Python language", "= self.varargs or vargs or kwargs if (not var) and len(params) != len(self.params):", "return self.default(*args, **kwargs) def _getter(self, obj, *args, **kwargs): func = ElementarySchema._getter(self, obj, *args,", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "is not self.default: errormsg = 'Error while validating {0} with {1}'.format(data, self) if", "params from input function and rtype. :return: OrderedDict, rtype, vargs and kwargs. :rtype:", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. #", "compile as re_compile from b3j0f.utils.version import OrderedDict from b3j0f.utils.path import lookup from ..base", "= OrderedDict() for index, arg in enumerate(args): pkwargs = { 'name': arg, 'mandatory':", "return params, rtype, vargs, kwargs def __call__(self, *args, **kwargs): return self.default(*args, **kwargs) def", "inspect import getargspec, getsourcelines, isclass, isbuiltin from functools import wraps __all__ = [", "-*- # -------------------------------------------------------------------- # The MIT License (MIT) # # Copyright (c) 2016", "attr = getattr(_resource, attrname) if not isinstance(attr, MemberDescriptorType): kwargs[attrname] = attr result =", "# # The above copyright notice and this permission notice shall be included", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "parameter value is mandatory. def _setvalue(self, schema, value, *args, **kwargs): super(ParamSchema, self)._setvalue(schema, value,", "objects. \"\"\" _PDESC = r':param (?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE = r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE =", "if islist: pschema = ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes) > 1: pschema = OneOfSchema(schemas=schemas,", "!= self.name: raise TypeError( '{0}. Wrong function name {1}. {2} expected.'.format( errormsg, data.__name__,", "= match[4].strip() or None if rrtype: rtypes = rrtype.split(',') schemas = [] for", "rtype_[8:], scope=scope ) else: raise except ImportError: msg = 'rtype \"{0}\" ({1}) from", "..base import Schema, DynamicValue from .factory import SchemaBuilder, build from ..elementary import ElementarySchema,", "@updatecontent class ParamSchema(RefSchema): \"\"\"Function parameter schema.\"\"\" #: if true (default), update self ref", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "cls._REC.findall(function.__doc__): if rtype is None: rrtype = match[4].strip() or None if rrtype: rtypes", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "except TypeError: schemacls = ParamTypeSchema(type=lkptype) pschema = schemacls() if islist: pschema = ArraySchema(itemtype=pschema)", "ParamTypeSchema(type=lkptype) pschema = schemacls() if islist: pschema = ArraySchema(itemtype=pschema) schemas.append(pschema) if len(ptypes) >", "vargs or kwargs if (not var) and len(params) != len(self.params): raise TypeError( '{0}.", "None and not isbuiltin(function): scope = get_function_globals(function) for match in cls._REC.findall(function.__doc__): if rtype", "'{0}. Wrong param length: {1}. {2} expected.'.format( errormsg, len(params), len(self.params) ) ) if", "result = type(resname, (Schema,), kwargs) return result def getresource(self, schemacls): result = None", "result def getresource(self, schemacls): result = None if hasattr(schemacls, 'mro'): for mro in", "(?P<ptype1>[\\w_,]+) (?P<pname1>\\w+):' _PTYPE = r':type (?P<pname2>[\\w_]+):(?P<ptype2>[^\\n]+)' _RTYPE = r':rtype:(?P<rtype>[^\\n]+)' _REC = re_compile('{0}|{1}|{2}'.format(_PDESC, _PTYPE,", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "self._getparams_rtype(value) self.vargs = vargs or kwargs params = [] selfparams = {} for", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "val = param.default if isinstance(val, DynamicValue): val = val() if ( val is", "params = [] selfparams = {} for selfparam in self.params: selfparams[selfparam.name] = selfparam", "None else data2schema(value) pkwargs['mandatory'] = False params[arg] = pkwargs rtype = None #", "from b3j0f.utils.path import lookup from ..base import Schema, DynamicValue from .factory import SchemaBuilder,", "rrtype.split(',') schemas = [] for rtype_ in rtypes: rtype_ = rtype_.strip() islist =", "at {2}. {3} expected.'.format( errormsg, name, index, param.name ) ) val = param.default", "at {3}. Expected {4}.'.format( errormsg, name, default, index, val ) ) def _setvalue(self,", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "index, arg in enumerate(args): pkwargs = { 'name': arg, 'mandatory': True } #", "do so, subject to the following conditions: # # The above copyright notice", "= None if value is None else data2schema(value) pkwargs['mandatory'] = False params[arg] =", "= func(obj, *args, **kwargs) except TypeError: result = func(*args, **kwargs) return result result.source", "function params from input function and rtype. :return: OrderedDict, rtype, vargs and kwargs.", "for index, pkwarg in enumerate(pkwargs.values()): name = pkwarg['name'] selfparam = None # old", "permit persons to whom the Software is # furnished to do so, subject", "pschema = OneOfSchema(schemas=schemas, nullable=True) else: pschema = schemas[0] params[pname]['ref'] = pschema return params,", "else: raise except ImportError: msg = 'Error on ptype \"{0}\" ({1}) from {2}", "is not None and type(self.rtype) != type(rtype): raise TypeError( '{0}. Wrong rtype {1}.", "= _resource else: result = datatype2schemacls(_datatype=_resource, _force=False) if result is None: resname =", "is not None: setattr(selfparam, key, val) params.append(selfparam) self.params = params self.impltype = 'python'", "{4}.'.format( errormsg, name, default, index, val ) ) def _setvalue(self, schema, value): if", "\"\"\"Decorator to use in order to transform a function into a schema.\"\"\" if", "# # Copyright (c) 2016 <NAME> <<EMAIL>> # # Permission is hereby granted,", "TypeError( '{0}. Wrong val {1}/{2} at {3}. Expected {4}.'.format( errormsg, name, default, index,", "building python classes.\"\"\" __name__ = 'python' def build(self, _resource, **kwargs): if not isclass(_resource):", "from inspect import getargspec, getsourcelines, isclass, isbuiltin from functools import wraps __all__ =", "is None else len(default)) params = OrderedDict() for index, arg in enumerate(args): pkwargs", "None: return lambda _cls: buildschema(_cls=_cls, **kwargs) result = build(_cls, **kwargs) return result class", "(Schema,), kwargs) return result def getresource(self, schemacls): result = None if hasattr(schemacls, 'mro'):", "OneOfSchema, TypeSchema from ..utils import updatecontent, data2schema, datatype2schemacls, RefSchema from types import (", "language schemas utilities.\"\"\" from re import compile as re_compile from b3j0f.utils.version import OrderedDict", "**kwargs): try: result = func(obj, *args, **kwargs) except TypeError: result = func(*args, **kwargs)", "= True mandatory = True #: if true (default), parameter value is mandatory.", "{1} at {2}. {3} expected.'.format( errormsg, name, index, param.name ) ) val =", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "_resource.__name__ if 'name' not in kwargs: kwargs['name'] = resname for attrname in dir(_resource):", "*args, **kwargs): return self.default(*args, **kwargs) def _getter(self, obj, *args, **kwargs): func = ElementarySchema._getter(self,", "schema.\"\"\" if default is None: return lambda default: funcschema(default=default, *args, **kwargs) return FunctionSchema(default=default,", "= True try: if ptype[-1] == 's': lkptype = lookup(ptype[:-1], scope=scope) elif ptype.startswith('list", "pname and pname in params: ptype = (match[0] or match[3]).strip() ptypes = ptype.split(',')", "in schemacls.mro(): if issubclass(mro, Schema): result = mro break return result def buildschema(_cls=None,", "pkwargs rtype = None # parse docstring if function.__doc__ is not None and", "def __call__(self, *args, **kwargs): return self.default(*args, **kwargs) def _getter(self, obj, *args, **kwargs): func", "try: result = func(obj, *args, **kwargs) except TypeError: result = func(*args, **kwargs) return", "charge of embedding a parameter type which met a problem while generating a", "not None and val != default ): raise TypeError( '{0}. Wrong val {1}/{2}", "if schema.name == 'default': self._setter(obj=self, value=value) def _setter(self, obj, value, *args, **kwargs): if", "if 'name' not in kwargs: kwargs['name'] = resname for attrname in dir(_resource): if", "# SOFTWARE. # -------------------------------------------------------------------- \"\"\"Python language schemas utilities.\"\"\" from re import compile as", "kwargs, default = (), (), (), () indexlen = len(args) - (0 if", "index, param.name ) ) val = param.default if isinstance(val, DynamicValue): val = val()", "not None and not isbuiltin(function): scope = get_function_globals(function) for match in cls._REC.findall(function.__doc__): if", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "= {} for selfparam in self.params: selfparams[selfparam.name] = selfparam index = 0 for", "(), (), () indexlen = len(args) - (0 if default is None else", "val ) ) def _setvalue(self, schema, value): if schema.name == 'default': self._setter(obj=self, value=value)", "value = default[index - indexlen] pkwargs['default'] = value pkwargs['ref'] = None if value", "schema. Dedicated to describe functions, methods and lambda objects. \"\"\" _PDESC = r':param", "val {1}/{2} at {3}. Expected {4}.'.format( errormsg, name, default, index, val ) )", "_setter(self, obj, value, *args, **kwargs): if hasattr(self, 'olddefault'): if self.olddefault is value: return", "_resource, **kwargs): if not isclass(_resource): raise TypeError( 'Wrong type {0}, \\'type\\' expected'.format(_resource) )", "import get_function_globals from inspect import getargspec, getsourcelines, isclass, isbuiltin from functools import wraps", "rtype, vargs and kwargs. :rtype: tuple \"\"\" try: args, vargs, kwargs, default =", ") @updatecontent class ParamSchema(RefSchema): \"\"\"Function parameter schema.\"\"\" #: if true (default), update self", "islist: rschema = ArraySchema(itemtype=rschema) schemas.append(rschema) if len(rtypes) > 1: rtype = OneOfSchema(schemas=schemas, nullable=True)", "type(rtype): raise TypeError( '{0}. Wrong rtype {1}. {2} expected.'.format( rtype, self.rtype ) )", "self.mandatory = False class FunctionSchema(ElementarySchema): \"\"\"Function schema. Dedicated to describe functions, methods and", "not isbuiltin(function): scope = get_function_globals(function) for match in cls._REC.findall(function.__doc__): if rtype is None:", "func = ElementarySchema._getter(self, obj, *args, **kwargs) @wraps(func) def result(*args, **kwargs): try: result =", "selfparams = {} for selfparam in self.params: selfparams[selfparam.name] = selfparam index = 0", "kwargs[attrname] = attr result = type(resname, (Schema,), kwargs) return result def getresource(self, schemacls):", "errormsg, name, default, index, val ) ) def _setvalue(self, schema, value): if schema.name", "arg, 'mandatory': True } # param kwargs if index >= indexlen: # has", "= lookup( rtype_[8:], scope=scope ) else: raise except ImportError: msg = 'rtype \"{0}\"", "data=data, *args, **kwargs) if data != self.default or data is not self.default: errormsg", "if not isclass(_resource): raise TypeError( 'Wrong type {0}, \\'type\\' expected'.format(_resource) ) if issubclass(_resource,", "hereby granted, free of charge, to any person obtaining a copy # of", "result def funcschema(default=None, *args, **kwargs): \"\"\"Decorator to use in order to transform a", "SOFTWARE. # -------------------------------------------------------------------- \"\"\"Python language schemas utilities.\"\"\" from re import compile as re_compile", "name in selfparams: selfparam = selfparams[name] if selfparam is None: selfparam = ParamSchema(**pkwarg)", "if len(rtypes) > 1: rtype = OneOfSchema(schemas=schemas, nullable=True) else: rtype = schemas[0] continue", "= True try: if rtype_[-1] == 's': lkrtype = lookup( rtype_[:-1], scope=scope )", "schema, value): if schema.name == 'default': self._setter(obj=self, value=value) def _setter(self, obj, value, *args,", "of '): lkrtype = lookup( rtype_[8:], scope=scope ) else: raise except ImportError: msg", "not in kwargs: kwargs['name'] = resname for attrname in dir(_resource): if ( attrname", "None # old self param if name in selfparams: selfparam = selfparams[name] if" ]
[ "from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from tonga.errors import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1)", "coding: utf-8 # Copyright (c) Qotto, 2019 import uuid import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner", "python # coding: utf-8 # Copyright (c) Qotto, 2019 import uuid import pytest", "uuid import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from tonga.errors import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key():", "statefulset_partitioner = StatefulsetPartitioner(instance=1) for i in range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2,", "[0, 1, 2, 3]) == 1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber):", "2019 import uuid import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from tonga.errors import OutsideInstanceNumber", "tonga.errors import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1) for i in range(0, 100):", "2, 3], [0, 1, 2, 3]) == 1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100)", "in range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2, 3])", "= StatefulsetPartitioner(instance=1) for i in range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3],", "range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2, 3]) ==", "test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber): statefulset_partitioner.__call__(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1,", "def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1) for i in range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex, [0,", "import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from tonga.errors import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner", "3], [0, 1, 2, 3]) == 1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100) with", "i in range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2,", "statefulset_partitioner = StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber): statefulset_partitioner.__call__(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2,", "test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1) for i in range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex, [0, 1,", "Copyright (c) Qotto, 2019 import uuid import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from", "import StatefulsetPartitioner from tonga.errors import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1) for i", "2, 3]) == 1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber): statefulset_partitioner.__call__(uuid.uuid4().hex, [0,", "== 1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber): statefulset_partitioner.__call__(uuid.uuid4().hex, [0, 1, 2,", "# Copyright (c) Qotto, 2019 import uuid import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner", "#!/usr/bin/env python # coding: utf-8 # Copyright (c) Qotto, 2019 import uuid import", "StatefulsetPartitioner(instance=1) for i in range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3], [0,", "[0, 1, 2, 3], [0, 1, 2, 3]) == 1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner", "statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2, 3]) == 1 def test_statefulset_partitioner_bad_instance():", "from tonga.errors import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1) for i in range(0,", "def test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber): statefulset_partitioner.__call__(uuid.uuid4().hex, [0, 1, 2, 3], [0,", "import uuid import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from tonga.errors import OutsideInstanceNumber def", "1, 2, 3], [0, 1, 2, 3]) == 1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner =", "OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1) for i in range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex,", "assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2, 3]) == 1 def", "StatefulsetPartitioner from tonga.errors import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1) for i in", "pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from tonga.errors import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner =", "1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber): statefulset_partitioner.__call__(uuid.uuid4().hex, [0, 1, 2, 3],", "import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1) for i in range(0, 100): assert", "3]) == 1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber): statefulset_partitioner.__call__(uuid.uuid4().hex, [0, 1,", "# coding: utf-8 # Copyright (c) Qotto, 2019 import uuid import pytest from", "(c) Qotto, 2019 import uuid import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from tonga.errors", "Qotto, 2019 import uuid import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from tonga.errors import", "tonga.services.coordinator.partitioner.statefulset_partitioner import StatefulsetPartitioner from tonga.errors import OutsideInstanceNumber def test_statefulset_partitioner_with_str_uuid_key(): statefulset_partitioner = StatefulsetPartitioner(instance=1) for", "for i in range(0, 100): assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1,", "1, 2, 3]) == 1 def test_statefulset_partitioner_bad_instance(): statefulset_partitioner = StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber): statefulset_partitioner.__call__(uuid.uuid4().hex,", "= StatefulsetPartitioner(instance=100) with pytest.raises(OutsideInstanceNumber): statefulset_partitioner.__call__(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2, 3])", "utf-8 # Copyright (c) Qotto, 2019 import uuid import pytest from tonga.services.coordinator.partitioner.statefulset_partitioner import", "100): assert statefulset_partitioner(uuid.uuid4().hex, [0, 1, 2, 3], [0, 1, 2, 3]) == 1" ]
[]
[ "the first argument, anything else # will be passed as payloads for the", "\"\"\" Base class for decorators in Eris. \"\"\" # The interface for hooks", "module. Contains various decorators for hook callbacks. \"\"\" class BaseDecorator: \"\"\" Base class", "decorators for hook callbacks. \"\"\" class BaseDecorator: \"\"\" Base class for decorators in", "class for decorators in Eris. \"\"\" # The interface for hooks means that", "# The interface for hooks means that events will always be the first", "\"\"\" # The interface for hooks means that events will always be the", "will always be the first argument, anything else # will be passed as", "<filename>eris/decorators/__init__.py \"\"\" Decorator module. Contains various decorators for hook callbacks. \"\"\" class BaseDecorator:", "\"\"\" Decorator module. Contains various decorators for hook callbacks. \"\"\" class BaseDecorator: \"\"\"", "class BaseDecorator: \"\"\" Base class for decorators in Eris. \"\"\" # The interface", "argument, anything else # will be passed as payloads for the events. _EVENT_OFFSET:", "hook callbacks. \"\"\" class BaseDecorator: \"\"\" Base class for decorators in Eris. \"\"\"", "The interface for hooks means that events will always be the first argument,", "events will always be the first argument, anything else # will be passed", "decorators in Eris. \"\"\" # The interface for hooks means that events will", "for decorators in Eris. \"\"\" # The interface for hooks means that events", "Eris. \"\"\" # The interface for hooks means that events will always be", "always be the first argument, anything else # will be passed as payloads", "else # will be passed as payloads for the events. _EVENT_OFFSET: int =", "callbacks. \"\"\" class BaseDecorator: \"\"\" Base class for decorators in Eris. \"\"\" #", "for hook callbacks. \"\"\" class BaseDecorator: \"\"\" Base class for decorators in Eris.", "that events will always be the first argument, anything else # will be", "in Eris. \"\"\" # The interface for hooks means that events will always", "\"\"\" class BaseDecorator: \"\"\" Base class for decorators in Eris. \"\"\" # The", "first argument, anything else # will be passed as payloads for the events.", "Contains various decorators for hook callbacks. \"\"\" class BaseDecorator: \"\"\" Base class for", "# will be passed as payloads for the events. _EVENT_OFFSET: int = 1", "hooks means that events will always be the first argument, anything else #", "Base class for decorators in Eris. \"\"\" # The interface for hooks means", "for hooks means that events will always be the first argument, anything else", "be the first argument, anything else # will be passed as payloads for", "BaseDecorator: \"\"\" Base class for decorators in Eris. \"\"\" # The interface for", "means that events will always be the first argument, anything else # will", "anything else # will be passed as payloads for the events. _EVENT_OFFSET: int", "various decorators for hook callbacks. \"\"\" class BaseDecorator: \"\"\" Base class for decorators", "interface for hooks means that events will always be the first argument, anything", "Decorator module. Contains various decorators for hook callbacks. \"\"\" class BaseDecorator: \"\"\" Base" ]
[ "kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid',", "layers from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): \"\"\" metrics used to evaluate the", "= keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1), name= 'depth') x = keras.layers.Conv1D(filters=", "padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1),", "expression_features = keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1), name= 'weight') weight_features = keras.layers.Flatten()(weight_input) x =", "x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features =", "keras.Input(shape=(1,1), name= 'weight') weight_features = keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features, weight_features]) data_dense = keras.layers.Dense(20,activation", "as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers import", "from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def", "import keras from tensorflow.keras import layers from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): \"\"\"", "x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x) x = layers.concatenate([sequence_features, depth_features]) conv_dense = keras.layers.Dense(108,", "= keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x)", "padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu',", "keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve = \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return METRICS def", "x = keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input], outputs=", "output_bias is not None: output_bias = tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1), name='sequence_conv') x =", "def tempoMAGE(metrics, output_bias= None): if output_bias is not None: output_bias = tf.keras.initializers.Constant(output_bias) seq_input", "x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x =", "to evaluate the model's perfromance \"\"\" METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'),", "activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x) x = layers.concatenate([sequence_features, depth_features]) conv_dense", "kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x", "activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x)", "= keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128, kernel_size=(2,1),", "sequence_features = keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1), name= 'depth') x = keras.layers.Conv1D(filters= 32, kernel_size=(5),", "= keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters=", "padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x)", "keras.Input(shape=(400,5,1), name='sequence_conv') x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x", "padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x) x = layers.concatenate([sequence_features, depth_features])", "tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1), name='sequence_conv') x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x", "keras from tensorflow.keras import layers from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): \"\"\" metrics", "= keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input], outputs= seq_pred)", "activation = 'relu')(x) expression_input = keras.Input(shape=(20,1), name= 'expression') expression_features = keras.layers.Flatten()(expression_input) weight_input =", "seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input], outputs= seq_pred) model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(0.001),", "\"\"\" METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc',", "tempoMAGE(metrics, output_bias= None): if output_bias is not None: output_bias = tf.keras.initializers.Constant(output_bias) seq_input =", "kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x", "name='binary_crossentropy') ] return METRICS def tempoMAGE(metrics, output_bias= None): if output_bias is not None:", "= keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1), name= 'depth') x = keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid',", "kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input],", "keras.layers.Dense(20,activation = 'relu')(x) x = layers.concatenate([conv_dense, data_dense]) x = keras.layers.Dense(128, activation = 'relu',", "'relu')(x) x = layers.concatenate([conv_dense, data_dense]) x = keras.layers.Dense(128, activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x", "output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input], outputs= seq_pred) model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(0.001), metrics= metrics) return model", "keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return METRICS def tempoMAGE(metrics, output_bias= None): if output_bias is not", "keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve = \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return METRICS def tempoMAGE(metrics,", "'weight') weight_features = keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features, weight_features]) data_dense = keras.layers.Dense(20,activation = 'relu')(x)", "tensorflow.keras import layers from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): \"\"\" metrics used to", "= keras.Input(shape=(400,5,1), name='sequence_conv') x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x)", "\"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return METRICS def tempoMAGE(metrics, output_bias= None): if output_bias is", "keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve = \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return METRICS def tempoMAGE(metrics, output_bias=", "= keras.layers.Dense(128, activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer=", "keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1), name= 'depth') x = keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu',", "= keras.Input(shape=(400,1), name= 'depth') x = keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x", "x = layers.concatenate([sequence_features, depth_features]) conv_dense = keras.layers.Dense(108, activation = 'relu')(x) expression_input = keras.Input(shape=(20,1),", "architecture \"\"\" import tensorflow as tf from tensorflow import keras from tensorflow.keras import", "activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model", "x = keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features =", "#!/usr/bin/python \"\"\" TempMAGE model architecture \"\"\" import tensorflow as tf from tensorflow import", "perfromance \"\"\" METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'),", "seq_input = keras.Input(shape=(400,5,1), name='sequence_conv') x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x =", "padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu',", "name= 'expression') expression_features = keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1), name= 'weight') weight_features = keras.layers.Flatten()(weight_input)", "keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid',", "x = keras.layers.Dense(128, activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1,", "used to evaluate the model's perfromance \"\"\" METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'),", "x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x =", "x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x =", "import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from", "evaluate the model's perfromance \"\"\" METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'),", "= 'relu')(x) x = layers.concatenate([conv_dense, data_dense]) x = keras.layers.Dense(128, activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x)", "output_bias = tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1), name='sequence_conv') x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu',", "= keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x)", "model architecture \"\"\" import tensorflow as tf from tensorflow import keras from tensorflow.keras", "keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve = \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return METRICS", "keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x", "keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features, weight_features]) data_dense = keras.layers.Dense(20,activation = 'relu')(x) x = layers.concatenate([conv_dense,", "name='sequence_conv') x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x =", "keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 128,", "= layers.concatenate([expression_features, weight_features]) data_dense = keras.layers.Dense(20,activation = 'relu')(x) x = layers.concatenate([conv_dense, data_dense]) x", "x = keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x =", "activation='sigmoid',bias_initializer= output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input], outputs= seq_pred) model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(0.001), metrics= metrics) return", "the model's perfromance \"\"\" METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'),", "kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x", "= keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters=", "keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features", "name= 'depth') x = keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x)", "is not None: output_bias = tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1), name='sequence_conv') x = keras.layers.Conv2D(filters=32,", "= layers.concatenate([sequence_features, depth_features]) conv_dense = keras.layers.Dense(108, activation = 'relu')(x) expression_input = keras.Input(shape=(20,1), name=", "depth_features = keras.layers.Flatten()(x) x = layers.concatenate([sequence_features, depth_features]) conv_dense = keras.layers.Dense(108, activation = 'relu')(x)", "keras.Input(shape=(400,1), name= 'depth') x = keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x =", "x = keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x =", "= \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return METRICS def tempoMAGE(metrics, output_bias= None): if output_bias", "= layers.concatenate([conv_dense, data_dense]) x = keras.layers.Dense(128, activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x)", "x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128,", "= keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1), name= 'weight') weight_features = keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features,", "64, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 128, kernel_size=(2),", "= keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64, kernel_size=(2,1),", "data_dense]) x = keras.layers.Dense(128, activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x) seq_pred =", "keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 64,", "\"\"\" import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers", "depth_features]) conv_dense = keras.layers.Dense(108, activation = 'relu')(x) expression_input = keras.Input(shape=(20,1), name= 'expression') expression_features", "'relu')(x) expression_input = keras.Input(shape=(20,1), name= 'expression') expression_features = keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1), name=", "= 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model =", "kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x =", "Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): \"\"\" metrics used to evaluate the model's perfromance \"\"\" METRICS", "if output_bias is not None: output_bias = tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1), name='sequence_conv') x", "layers.concatenate([sequence_features, depth_features]) conv_dense = keras.layers.Dense(108, activation = 'relu')(x) expression_input = keras.Input(shape=(20,1), name= 'expression')", "data_dense = keras.layers.Dense(20,activation = 'relu')(x) x = layers.concatenate([conv_dense, data_dense]) x = keras.layers.Dense(128, activation", "depth_input = keras.Input(shape=(400,1), name= 'depth') x = keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input)", "= keras.layers.Dense(20,activation = 'relu')(x) x = layers.concatenate([conv_dense, data_dense]) x = keras.layers.Dense(128, activation =", "weight_features]) data_dense = keras.layers.Dense(20,activation = 'relu')(x) x = layers.concatenate([conv_dense, data_dense]) x = keras.layers.Dense(128,", "keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features", "keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve = \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ]", ")(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1), name= 'depth') x", "tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): \"\"\" metrics used to evaluate the model's perfromance", "keras.metrics.AUC(name='PR_auc', curve = \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return METRICS def tempoMAGE(metrics, output_bias= None):", "keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid',", "x = layers.concatenate([conv_dense, data_dense]) x = keras.layers.Dense(128, activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x =", "keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x) x = layers.concatenate([sequence_features, depth_features]) conv_dense = keras.layers.Dense(108, activation =", "output_bias= None): if output_bias is not None: output_bias = tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1),", "keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input], outputs= seq_pred) model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(0.001), metrics= metrics)", "= [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve =", "kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x) x = layers.concatenate([sequence_features,", "= keras.layers.Dense(108, activation = 'relu')(x) expression_input = keras.Input(shape=(20,1), name= 'expression') expression_features = keras.layers.Flatten()(expression_input)", "from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): \"\"\" metrics used to evaluate the model's", "keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x =", "= keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features, weight_features]) data_dense = keras.layers.Dense(20,activation = 'relu')(x) x =", "keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve = \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return", "keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x) x", "activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x)", "= keras.layers.Flatten()(x) x = layers.concatenate([sequence_features, depth_features]) conv_dense = keras.layers.Dense(108, activation = 'relu')(x) expression_input", "keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input], outputs= seq_pred) model.compile(loss='binary_crossentropy',", "import layers from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): \"\"\" metrics used to evaluate", "conv_dense = keras.layers.Dense(108, activation = 'relu')(x) expression_input = keras.Input(shape=(20,1), name= 'expression') expression_features =", "\"\"\" metrics used to evaluate the model's perfromance \"\"\" METRICS = [ keras.metrics.TruePositives(name='tp'),", "= 'relu')(x) expression_input = keras.Input(shape=(20,1), name= 'expression') expression_features = keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1),", "expression_input = keras.Input(shape=(20,1), name= 'expression') expression_features = keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1), name= 'weight')", "= keras.Input(shape=(1,1), name= 'weight') weight_features = keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features, weight_features]) data_dense =", "kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1), name= 'depth')", "layers.concatenate([conv_dense, data_dense]) x = keras.layers.Dense(128, activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x) seq_pred", "name= 'weight') weight_features = keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features, weight_features]) data_dense = keras.layers.Dense(20,activation =", "tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics():", "weight_input = keras.Input(shape=(1,1), name= 'weight') weight_features = keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features, weight_features]) data_dense", "from tensorflow.keras import layers from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): \"\"\" metrics used", "None): if output_bias is not None: output_bias = tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1), name='sequence_conv')", "METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve", "set_metrics(): \"\"\" metrics used to evaluate the model's perfromance \"\"\" METRICS = [", "kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu',", "= keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input, weight_input], outputs= seq_pred) model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(0.001), metrics=", "kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x) x = layers.concatenate([sequence_features, depth_features]) conv_dense =", "def set_metrics(): \"\"\" metrics used to evaluate the model's perfromance \"\"\" METRICS =", "= keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x)", "x = layers.concatenate([expression_features, weight_features]) data_dense = keras.layers.Dense(20,activation = 'relu')(x) x = layers.concatenate([conv_dense, data_dense])", "import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout def set_metrics(): \"\"\" metrics used to evaluate the model's perfromance \"\"\"", "x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x)", "keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve = \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True,", "kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x) depth_input =", "not None: output_bias = tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1), name='sequence_conv') x = keras.layers.Conv2D(filters=32, kernel_size=(10,5),", "tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers import Conv2D,MaxPool2D,Flatten,Conv1D,MaxPooling1D,Dense,Dropout", "model's perfromance \"\"\" METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'),", "x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64,", "'depth') x = keras.layers.Conv1D(filters= 32, kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x", "keras.layers.Dense(108, activation = 'relu')(x) expression_input = keras.Input(shape=(20,1), name= 'expression') expression_features = keras.layers.Flatten()(expression_input) weight_input", "'expression') expression_features = keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1), name= 'weight') weight_features = keras.layers.Flatten()(weight_input) x", "activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1), name=", "return METRICS def tempoMAGE(metrics, output_bias= None): if output_bias is not None: output_bias =", "activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x)", "kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid',", "METRICS def tempoMAGE(metrics, output_bias= None): if output_bias is not None: output_bias = tf.keras.initializers.Constant(output_bias)", "= tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1), name='sequence_conv') x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input)", "x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1), name= 'depth') x =", "keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x) depth_input = keras.Input(shape=(400,1), name= 'depth') x = keras.layers.Conv1D(filters= 32,", "keras.layers.Flatten()(x) x = layers.concatenate([sequence_features, depth_features]) conv_dense = keras.layers.Dense(108, activation = 'relu')(x) expression_input =", "= keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 64, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x)", "= keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x", "128, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x) x =", "keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1), name= 'weight') weight_features = keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features, weight_features])", "keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005) )(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) sequence_features = keras.layers.Flatten()(x) depth_input", "kernel_size=(10,5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu',", "tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers", "metrics used to evaluate the model's perfromance \"\"\" METRICS = [ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'),", "\"\"\" TempMAGE model architecture \"\"\" import tensorflow as tf from tensorflow import keras", "32, kernel_size=(5), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(depth_input) x = keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 64, kernel_size=(2),", "weight_features = keras.layers.Flatten()(weight_input) x = layers.concatenate([expression_features, weight_features]) data_dense = keras.layers.Dense(20,activation = 'relu')(x) x", "None: output_bias = tf.keras.initializers.Constant(output_bias) seq_input = keras.Input(shape=(400,5,1), name='sequence_conv') x = keras.layers.Conv2D(filters=32, kernel_size=(10,5), padding='valid',", "keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve = \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy')", "= keras.layers.MaxPooling1D(pool_size=(2))(x) depth_features = keras.layers.Flatten()(x) x = layers.concatenate([sequence_features, depth_features]) conv_dense = keras.layers.Dense(108, activation", "= keras.layers.MaxPooling1D(pool_size=(2))(x) x = keras.layers.Conv1D(filters= 128, kernel_size=(2), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling1D(pool_size=(2))(x)", "layers.concatenate([expression_features, weight_features]) data_dense = keras.layers.Dense(20,activation = 'relu')(x) x = layers.concatenate([conv_dense, data_dense]) x =", "keras.layers.Dense(128, activation = 'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x)", "[ keras.metrics.TruePositives(name='tp'), keras.metrics.FalsePositives(name='fp'), keras.metrics.TrueNegatives(name='tn'), keras.metrics.FalseNegatives(name='fn'), keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='ROC_auc'), keras.metrics.AUC(name='PR_auc', curve = \"PR\"),", "= keras.Input(shape=(20,1), name= 'expression') expression_features = keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1), name= 'weight') weight_features", "keras.Input(shape=(20,1), name= 'expression') expression_features = keras.layers.Flatten()(expression_input) weight_input = keras.Input(shape=(1,1), name= 'weight') weight_features =", "curve = \"PR\"), keras.losses.BinaryCrossentropy(from_logits=True, name='binary_crossentropy') ] return METRICS def tempoMAGE(metrics, output_bias= None): if", "TempMAGE model architecture \"\"\" import tensorflow as tf from tensorflow import keras from", "padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=128, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005)", "'relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x = keras.layers.Dropout(0.3)(x) seq_pred = keras.layers.Dense(1, activation='sigmoid',bias_initializer= output_bias)(x) model = keras.Model(inputs=[seq_input,depth_input,expression_input,", "] return METRICS def tempoMAGE(metrics, output_bias= None): if output_bias is not None: output_bias", "activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(seq_input) x = keras.layers.MaxPooling2D(pool_size=(2,1))(x) x = keras.layers.Conv2D(filters=64, kernel_size=(2,1), padding='valid', activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0005))(x) x" ]
[ ":license: Custom BSD, see LICENSE for more details. :email: <EMAIL> \"\"\" import time", "<NAME> :copyright: Northeastern University © 2018. :license: Custom BSD, see LICENSE for more", "2018. :license: Custom BSD, see LICENSE for more details. :email: <EMAIL> \"\"\" import", "~~~~~~~~~~~ Internal file for dummy function checking. :author: <NAME> :copyright: Northeastern University ©", "see LICENSE for more details. :email: <EMAIL> \"\"\" import time import configs.system print", "\"\"\" test.py ~~~~~~~~~~~ Internal file for dummy function checking. :author: <NAME> :copyright: Northeastern", "coding: utf-8 -*- \"\"\" test.py ~~~~~~~~~~~ Internal file for dummy function checking. :author:", "BSD, see LICENSE for more details. :email: <EMAIL> \"\"\" import time import configs.system", "test.py ~~~~~~~~~~~ Internal file for dummy function checking. :author: <NAME> :copyright: Northeastern University", "University © 2018. :license: Custom BSD, see LICENSE for more details. :email: <EMAIL>", "# -*- coding: utf-8 -*- \"\"\" test.py ~~~~~~~~~~~ Internal file for dummy function", "Northeastern University © 2018. :license: Custom BSD, see LICENSE for more details. :email:", "for dummy function checking. :author: <NAME> :copyright: Northeastern University © 2018. :license: Custom", "function checking. :author: <NAME> :copyright: Northeastern University © 2018. :license: Custom BSD, see", "Internal file for dummy function checking. :author: <NAME> :copyright: Northeastern University © 2018.", "file for dummy function checking. :author: <NAME> :copyright: Northeastern University © 2018. :license:", "dummy function checking. :author: <NAME> :copyright: Northeastern University © 2018. :license: Custom BSD,", "checking. :author: <NAME> :copyright: Northeastern University © 2018. :license: Custom BSD, see LICENSE", "for more details. :email: <EMAIL> \"\"\" import time import configs.system print configs.system.PROJECT_ROOT time.sleep(100)", ":author: <NAME> :copyright: Northeastern University © 2018. :license: Custom BSD, see LICENSE for", "-*- \"\"\" test.py ~~~~~~~~~~~ Internal file for dummy function checking. :author: <NAME> :copyright:", "-*- coding: utf-8 -*- \"\"\" test.py ~~~~~~~~~~~ Internal file for dummy function checking.", ":copyright: Northeastern University © 2018. :license: Custom BSD, see LICENSE for more details.", "LICENSE for more details. :email: <EMAIL> \"\"\" import time import configs.system print configs.system.PROJECT_ROOT", "© 2018. :license: Custom BSD, see LICENSE for more details. :email: <EMAIL> \"\"\"", "utf-8 -*- \"\"\" test.py ~~~~~~~~~~~ Internal file for dummy function checking. :author: <NAME>", "Custom BSD, see LICENSE for more details. :email: <EMAIL> \"\"\" import time import" ]
[ "def test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF)", "def test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR)", "P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha) class", "self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train, self.test = self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class): temp_save_file_folder", "= Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train, self.test = self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class): temp_save_file_folder =", "TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "def test_recommender(self): self.common_test_recommender(NMF) class IALSRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(IALS) if __name__ == '__main__': unittest.main()", "from RecSysFramework.Utils import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class = None def setUp(self): self.dataset =", "P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization", "file_name=\"temp_model\") evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase):", "test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta) class", "= EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object =", "evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator = EvaluatorHoldout([5], exclude_seen=True)", "validation_perc=0.0) self.train, self.test = self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class): temp_save_file_folder = self.dataset.get_complete_folder() + os.sep", "\"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train = self.train.get_URM() URM_test = self.test.get_URM() recommender_object = recommender_class(URM_train) if", "metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase):", "RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects from RecSysFramework.Recommender.KNN import UserKNNCF from RecSysFramework.Recommender.KNN import ItemKNNCF", "def common_test_recommender(self, recommender_class): temp_save_file_folder = self.dataset.get_complete_folder() + os.sep + \"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train", "class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def", "RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta", "self.dataset = Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train, self.test = self.splitter.split(self.dataset) def", "import PureSVD from RecSysFramework.Recommender.MatrixFactorization import IALS from RecSysFramework.Recommender.MatrixFactorization import NMF from RecSysFramework.Evaluation.Evaluator import", "EarlyStoppingModel): fit_params = {\"epochs\": 10} else: fit_params = {} recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5],", "SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "def test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects)", "test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF) class IALSRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(IALS) if", "import UserKNNCF from RecSysFramework.Recommender.KNN import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR from", "exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator", "class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def", "BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train, self.test = self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class):", "= self.dataset.get_complete_folder() + os.sep + \"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train = self.train.get_URM() URM_test =", "def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD)", "self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF) class IALSRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(IALS) if __name__", "import Holdout from RecSysFramework.Utils import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class = None def setUp(self):", "class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def", "10} else: fit_params = {} recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object,", "test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects) class", "RecSysFramework.Recommender.KNN import UserKNNCF from RecSysFramework.Recommender.KNN import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR", "fit_params = {\"epochs\": 10} else: fit_params = {} recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5], exclude_seen=True)", "def test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF)", "= recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel): fit_params = {\"epochs\": 10} else: fit_params = {}", "self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase):", "GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF) class", "import Movielens1MReader from RecSysFramework.DataManager.Splitter import Holdout from RecSysFramework.Utils import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class", "self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase):", "\"\"\" import unittest import os, shutil from RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects from", "def test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta)", "= EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder,", "URM_train = self.train.get_URM() URM_test = self.test.get_URM() recommender_object = recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel): fit_params", "test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF) class", "class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def", "RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "UserKNNCF from RecSysFramework.Recommender.KNN import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet", "Holdout from RecSysFramework.Utils import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class = None def setUp(self): self.dataset", "class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF) class IALSRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(IALS) if __name__ ==", "else: fit_params = {} recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test)", "shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop) class", "-*- coding: utf-8 -*- \"\"\" Created on 22/11/2018 @author: XXX \"\"\" import unittest", "EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random)", "self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase):", "TopPop, Random, GlobalEffects from RecSysFramework.Recommender.KNN import UserKNNCF from RecSysFramework.Recommender.KNN import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR", "# -*- coding: utf-8 -*- \"\"\" Created on 22/11/2018 @author: XXX \"\"\" import", "utf-8 -*- \"\"\" Created on 22/11/2018 @author: XXX \"\"\" import unittest import os,", "exist_ok=True) URM_train = self.train.get_URM() URM_test = self.test.get_URM() recommender_object = recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel):", "test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF) class", "self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase):", "= None def setUp(self): self.dataset = Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train,", "import SLIM as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from", "from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from RecSysFramework.DataManager.Reader import Movielens1MReader from RecSysFramework.DataManager.Splitter import Holdout from", "RecSysFramework.DataManager.Reader import Movielens1MReader from RecSysFramework.DataManager.Splitter import Holdout from RecSysFramework.Utils import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase):", "def test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF)", "file_name=\"temp_model\") recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object,", "= evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator = EvaluatorHoldout([5],", "URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop)", "self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase):", "RecSysFramework.Utils import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class = None def setUp(self): self.dataset = Movielens1MReader().load_data()", "import RP3beta from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization import PureSVD from", "evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object = recommender_class(URM_train)", "test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD) class", "FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase):", "class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def", "RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "def test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha)", "from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization import", "NMF from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from RecSysFramework.DataManager.Reader import Movielens1MReader from RecSysFramework.DataManager.Splitter import Holdout", "test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD) class", "self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF) class IALSRecommenderTestCase(RecommenderTestCase):", "= evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def", "os.makedirs(temp_save_file_folder, exist_ok=True) URM_train = self.train.get_URM() URM_test = self.test.get_URM() recommender_object = recommender_class(URM_train) if isinstance(recommender_object,", "= recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder,", "class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def", "from RecSysFramework.Recommender.MatrixFactorization import NMF from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from RecSysFramework.DataManager.Reader import Movielens1MReader from", "common_test_recommender(self, recommender_class): temp_save_file_folder = self.dataset.get_complete_folder() + os.sep + \"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train =", "SLIM as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha", "from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization import PureSVD from RecSysFramework.Recommender.MatrixFactorization import", "def test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE)", "-*- \"\"\" Created on 22/11/2018 @author: XXX \"\"\" import unittest import os, shutil", "SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender", "recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel): fit_params = {\"epochs\": 10} else: fit_params = {} recommender_object.fit(**fit_params)", "SLIM as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from RecSysFramework.Recommender.MatrixFactorization", "isinstance(recommender_object, EarlyStoppingModel): fit_params = {\"epochs\": 10} else: fit_params = {} recommender_object.fit(**fit_params) evaluator =", "{} recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object", "setUp(self): self.dataset = Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train, self.test = self.splitter.split(self.dataset)", "temp_save_file_folder = self.dataset.get_complete_folder() + os.sep + \"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train = self.train.get_URM() URM_test", "import SLIM as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import", "RecSysFramework.DataManager.Splitter import Holdout from RecSysFramework.Utils import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class = None def", "recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler =", "RecSysFramework.Recommender.MatrixFactorization import IALS from RecSysFramework.Recommender.MatrixFactorization import NMF from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from RecSysFramework.DataManager.Reader", "AsySVD from RecSysFramework.Recommender.MatrixFactorization import PureSVD from RecSysFramework.Recommender.MatrixFactorization import IALS from RecSysFramework.Recommender.MatrixFactorization import NMF", "= {} recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\")", "ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase):", "from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD,", "RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization import PureSVD from RecSysFramework.Recommender.MatrixFactorization import IALS", "self.train, self.test = self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class): temp_save_file_folder = self.dataset.get_complete_folder() + os.sep +", "import BPRMF, FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization import PureSVD from RecSysFramework.Recommender.MatrixFactorization import IALS from", "FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization import PureSVD from RecSysFramework.Recommender.MatrixFactorization import IALS from RecSysFramework.Recommender.MatrixFactorization import", "recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test)", "test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class", "Random, GlobalEffects from RecSysFramework.Recommender.KNN import UserKNNCF from RecSysFramework.Recommender.KNN import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import", "class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def", "22/11/2018 @author: XXX \"\"\" import unittest import os, shutil from RecSysFramework.Recommender.NonPersonalized import TopPop,", "self.test.get_URM() recommender_object = recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel): fit_params = {\"epochs\": 10} else: fit_params", "def test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF) class IALSRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(IALS)", "os, shutil from RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects from RecSysFramework.Recommender.KNN import UserKNNCF from", "RecSysFramework.Recommender.MatrixFactorization import NMF from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from RecSysFramework.DataManager.Reader import Movielens1MReader from RecSysFramework.DataManager.Splitter", "test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR) class", "= {\"epochs\": 10} else: fit_params = {} recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler", "evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "XXX \"\"\" import unittest import os, shutil from RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects", "Created on 22/11/2018 @author: XXX \"\"\" import unittest import os, shutil from RecSysFramework.Recommender.NonPersonalized", "None def setUp(self): self.dataset = Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train, self.test", "self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class): temp_save_file_folder = self.dataset.get_complete_folder() + os.sep + \"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True)", "SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from RecSysFramework.Recommender.MatrixFactorization import BPRMF,", "PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF) class IALSRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "\"\"\" Created on 22/11/2018 @author: XXX \"\"\" import unittest import os, shutil from", "@author: XXX \"\"\" import unittest import os, shutil from RecSysFramework.Recommender.NonPersonalized import TopPop, Random,", "recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class", "from RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects from RecSysFramework.Recommender.KNN import UserKNNCF from RecSysFramework.Recommender.KNN import", "as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from", "= self.test.get_URM() recommender_object = recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel): fit_params = {\"epochs\": 10} else:", "import unittest import os, shutil from RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects from RecSysFramework.Recommender.KNN", "recommender_class): temp_save_file_folder = self.dataset.get_complete_folder() + os.sep + \"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train = self.train.get_URM()", "RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD", "from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import", "test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF) class", "AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "import IALS from RecSysFramework.Recommender.MatrixFactorization import NMF from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from RecSysFramework.DataManager.Reader import", "from RecSysFramework.Recommender.KNN import UserKNNCF from RecSysFramework.Recommender.KNN import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import SLIM as", "metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator =", "+ \"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train = self.train.get_URM() URM_test = self.test.get_URM() recommender_object = recommender_class(URM_train)", "import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as", "from RecSysFramework.Recommender.MatrixFactorization import IALS from RecSysFramework.Recommender.MatrixFactorization import NMF from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from", "on 22/11/2018 @author: XXX \"\"\" import unittest import os, shutil from RecSysFramework.Recommender.NonPersonalized import", "import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD from", "recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True)", "SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD) class", "+ os.sep + \"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train = self.train.get_URM() URM_test = self.test.get_URM() recommender_object", "def test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD)", "import NMF from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from RecSysFramework.DataManager.Reader import Movielens1MReader from RecSysFramework.DataManager.Splitter import", "class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def", "evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def", "coding: utf-8 -*- \"\"\" Created on 22/11/2018 @author: XXX \"\"\" import unittest import", "GlobalEffects from RecSysFramework.Recommender.KNN import UserKNNCF from RecSysFramework.Recommender.KNN import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import SLIM", "class RecommenderTestCase(unittest.TestCase): recommender_class = None def setUp(self): self.dataset = Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8,", "recommender_object = recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel): fit_params = {\"epochs\": 10} else: fit_params =", "Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train, self.test = self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class): temp_save_file_folder = self.dataset.get_complete_folder()", "URM_test = self.test.get_URM() recommender_object = recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel): fit_params = {\"epochs\": 10}", "def test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF)", "EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\")", "Movielens1MReader from RecSysFramework.DataManager.Splitter import Holdout from RecSysFramework.Utils import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class =", "RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from RecSysFramework.DataManager.Reader import Movielens1MReader from RecSysFramework.DataManager.Splitter import Holdout from RecSysFramework.Utils", "from RecSysFramework.DataManager.Reader import Movielens1MReader from RecSysFramework.DataManager.Splitter import Holdout from RecSysFramework.Utils import EarlyStoppingModel class", "EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class = None def setUp(self): self.dataset = Movielens1MReader().load_data() self.splitter =", "RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender", "test_perc=0.2, validation_perc=0.0) self.train, self.test = self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class): temp_save_file_folder = self.dataset.get_complete_folder() +", "RecommenderTestCase(unittest.TestCase): recommender_class = None def setUp(self): self.dataset = Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8, test_perc=0.2,", "class SLIM_RMSERecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def", "class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on 22/11/2018 @author: XXX", "os.sep + \"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train = self.train.get_URM() URM_test = self.test.get_URM() recommender_object =", "self.common_test_recommender(SLIM_RMSE) class BPRMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase):", "NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF) class IALSRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(IALS) if __name__ == '__main__':", "RP3beta from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization import PureSVD from RecSysFramework.Recommender.MatrixFactorization", "from RecSysFramework.Recommender.KNN import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import", "URM_test=URM_test) recommender_object.save_model(temp_save_file_folder, file_name=\"temp_model\") recommender_object = recommender_class(URM_train) recommender_object.load_model(temp_save_file_folder, file_name=\"temp_model\") evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler", "ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "{\"epochs\": 10} else: fit_params = {} recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler =", "UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self):", "class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(NMF) class IALSRecommenderTestCase(RecommenderTestCase): def", "unittest import os, shutil from RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects from RecSysFramework.Recommender.KNN import", "EvaluatorHoldout from RecSysFramework.DataManager.Reader import Movielens1MReader from RecSysFramework.DataManager.Splitter import Holdout from RecSysFramework.Utils import EarlyStoppingModel", "import EvaluatorHoldout from RecSysFramework.DataManager.Reader import Movielens1MReader from RecSysFramework.DataManager.Splitter import Holdout from RecSysFramework.Utils import", "RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from RecSysFramework.Recommender.MatrixFactorization import BPRMF, FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization import PureSVD", "self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD) class PureSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(PureSVD) class NMFRecommenderTestCase(RecommenderTestCase):", "self.test = self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class): temp_save_file_folder = self.dataset.get_complete_folder() + os.sep + \"__temp__\"", "def setUp(self): self.dataset = Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train, self.test =", "ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE", "from RecSysFramework.Recommender.MatrixFactorization import PureSVD from RecSysFramework.Recommender.MatrixFactorization import IALS from RecSysFramework.Recommender.MatrixFactorization import NMF from", "PureSVD from RecSysFramework.Recommender.MatrixFactorization import IALS from RecSysFramework.Recommender.MatrixFactorization import NMF from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout", "BPRMF, FunkSVD, AsySVD from RecSysFramework.Recommender.MatrixFactorization import PureSVD from RecSysFramework.Recommender.MatrixFactorization import IALS from RecSysFramework.Recommender.MatrixFactorization", "self.train.get_URM() URM_test = self.test.get_URM() recommender_object = recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel): fit_params = {\"epochs\":", "def test_recommender(self): self.common_test_recommender(BPRMF) class FunkSVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(FunkSVD) class AsySVDRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(AsySVD)", "python3 # -*- coding: utf-8 -*- \"\"\" Created on 22/11/2018 @author: XXX \"\"\"", "self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(RP3beta) class SLIM_BPRRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(SLIM_BPR) class SLIM_RMSERecommenderTestCase(RecommenderTestCase):", "RecSysFramework.Recommender.MatrixFactorization import PureSVD from RecSysFramework.Recommender.MatrixFactorization import IALS from RecSysFramework.Recommender.MatrixFactorization import NMF from RecSysFramework.Evaluation.Evaluator", "RecSysFramework.Recommender.KNN import ItemKNNCF from RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM", "from RecSysFramework.Recommender.SLIM.BPR import SLIM as SLIM_BPR from RecSysFramework.Recommender.SLIM.ElasticNet import SLIM as SLIM_RMSE from", "if isinstance(recommender_object, EarlyStoppingModel): fit_params = {\"epochs\": 10} else: fit_params = {} recommender_object.fit(**fit_params) evaluator", "class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def", "as SLIM_RMSE from RecSysFramework.Recommender.GraphBased.P3alphaRecommender import P3alpha from RecSysFramework.Recommender.GraphBased.RP3betaRecommender import RP3beta from RecSysFramework.Recommender.MatrixFactorization import", "= self.splitter.split(self.dataset) def common_test_recommender(self, recommender_class): temp_save_file_folder = self.dataset.get_complete_folder() + os.sep + \"__temp__\" os.makedirs(temp_save_file_folder,", "= Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0) self.train, self.test = self.splitter.split(self.dataset) def common_test_recommender(self,", "class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random) class TopPopRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(TopPop) class GlobalEffectsRecommenderTestCase(RecommenderTestCase): def", "shutil from RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects from RecSysFramework.Recommender.KNN import UserKNNCF from RecSysFramework.Recommender.KNN", "IALS from RecSysFramework.Recommender.MatrixFactorization import NMF from RecSysFramework.Evaluation.Evaluator import EvaluatorHoldout from RecSysFramework.DataManager.Reader import Movielens1MReader", "self.dataset.get_complete_folder() + os.sep + \"__temp__\" os.makedirs(temp_save_file_folder, exist_ok=True) URM_train = self.train.get_URM() URM_test = self.test.get_URM()", "fit_params = {} recommender_object.fit(**fit_params) evaluator = EvaluatorHoldout([5], exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) recommender_object.save_model(temp_save_file_folder,", "import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class = None def setUp(self): self.dataset = Movielens1MReader().load_data() self.splitter", "recommender_class = None def setUp(self): self.dataset = Movielens1MReader().load_data() self.splitter = Holdout(train_perc=0.8, test_perc=0.2, validation_perc=0.0)", "exclude_seen=True) metrics_handler = evaluator.evaluateRecommender(recommender_object, URM_test=URM_test) shutil.rmtree(temp_save_file_folder, ignore_errors=True) class RandomRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(Random) class", "= self.train.get_URM() URM_test = self.test.get_URM() recommender_object = recommender_class(URM_train) if isinstance(recommender_object, EarlyStoppingModel): fit_params =", "self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(P3alpha) class RP3betaRecommenderTestCase(RecommenderTestCase):", "from RecSysFramework.DataManager.Splitter import Holdout from RecSysFramework.Utils import EarlyStoppingModel class RecommenderTestCase(unittest.TestCase): recommender_class = None", "import os, shutil from RecSysFramework.Recommender.NonPersonalized import TopPop, Random, GlobalEffects from RecSysFramework.Recommender.KNN import UserKNNCF", "self.common_test_recommender(GlobalEffects) class UserKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(UserKNNCF) class ItemKNNCFRecommenderTestCase(RecommenderTestCase): def test_recommender(self): self.common_test_recommender(ItemKNNCF) class P3alphaRecommenderTestCase(RecommenderTestCase):", "import TopPop, Random, GlobalEffects from RecSysFramework.Recommender.KNN import UserKNNCF from RecSysFramework.Recommender.KNN import ItemKNNCF from" ]
[]
[ "from django import setup setup() from scts.factory.build_app import build_app # noqa app =", "django import setup setup() from scts.factory.build_app import build_app # noqa app = build_app()" ]
[]
[ "<filename>web_wrapper/context_processors.py from django.conf import settings def features(request): return { 'CWR2_AVAILABLE': settings.CWR2_AVAILABLE, 'CWR3_AVAILABLE': settings.CWR3_AVAILABLE,", "from django.conf import settings def features(request): return { 'CWR2_AVAILABLE': settings.CWR2_AVAILABLE, 'CWR3_AVAILABLE': settings.CWR3_AVAILABLE, }" ]
[ "VERSION = '1.0.2' def main(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION) subparsers =", "= parser.add_subparsers() list.init(subparsers) depth.init(subparsers) args = parser.parse_args() args.func(args) if __name__ == \"__main__\": main()", "import depth VERSION = '1.0.2' def main(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION)", "action='version', version=VERSION) subparsers = parser.add_subparsers() list.init(subparsers) depth.init(subparsers) args = parser.parse_args() args.func(args) if __name__", "'1.0.2' def main(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION) subparsers = parser.add_subparsers() list.init(subparsers)", "import list import depth VERSION = '1.0.2' def main(): parser = argparse.ArgumentParser() parser.add_argument('--version',", "list import depth VERSION = '1.0.2' def main(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version',", "argparse import list import depth VERSION = '1.0.2' def main(): parser = argparse.ArgumentParser()", "import argparse import list import depth VERSION = '1.0.2' def main(): parser =", "depth VERSION = '1.0.2' def main(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION) subparsers", "= argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION) subparsers = parser.add_subparsers() list.init(subparsers) depth.init(subparsers) args = parser.parse_args()", "subparsers = parser.add_subparsers() list.init(subparsers) depth.init(subparsers) args = parser.parse_args() args.func(args) if __name__ == \"__main__\":", "argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION) subparsers = parser.add_subparsers() list.init(subparsers) depth.init(subparsers) args = parser.parse_args() args.func(args)", "parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION) subparsers = parser.add_subparsers() list.init(subparsers) depth.init(subparsers) args =", "main(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION) subparsers = parser.add_subparsers() list.init(subparsers) depth.init(subparsers) args", "parser.add_argument('--version', action='version', version=VERSION) subparsers = parser.add_subparsers() list.init(subparsers) depth.init(subparsers) args = parser.parse_args() args.func(args) if", "version=VERSION) subparsers = parser.add_subparsers() list.init(subparsers) depth.init(subparsers) args = parser.parse_args() args.func(args) if __name__ ==", "def main(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION) subparsers = parser.add_subparsers() list.init(subparsers) depth.init(subparsers)", "= '1.0.2' def main(): parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version=VERSION) subparsers = parser.add_subparsers()" ]
[ "= \"http://www.google.com\" response = requests.get(url) print(f\"your request to {url} came back w/ status", "requests url = \"http://www.google.com\" response = requests.get(url) print(f\"your request to {url} came back", "\"http://www.google.com\" response = requests.get(url) print(f\"your request to {url} came back w/ status code", "import requests url = \"http://www.google.com\" response = requests.get(url) print(f\"your request to {url} came", "= requests.get(url) print(f\"your request to {url} came back w/ status code {response.status_code}\") print(response.text)", "<reponame>norbertosanchezdichi/TIL import requests url = \"http://www.google.com\" response = requests.get(url) print(f\"your request to {url}", "response = requests.get(url) print(f\"your request to {url} came back w/ status code {response.status_code}\")", "url = \"http://www.google.com\" response = requests.get(url) print(f\"your request to {url} came back w/" ]
[ "for it in ks]).mean(dim=0) else: emb = embeds[k] new_embs.append(emb) embeds = torch.stack(new_embs) return", "self.get_split_info() def get_split_info(self): data = torch.load(self.root + '/metadata.t7') images, targets = [], []", "instance['sub'], \\ instance['pred'], instance['obj'] key = (sub, verb, obj) else: image, attr, obj", "in self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key)) return images, targets def parse_split(self): def parse_pairs(pair_list): with open(pair_list,", "ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs, all_objs = sorted(list(set(tr_attrs + ts_attrs))), \\ sorted(list(set(tr_objs", "all_attrs, all_objs = sorted(list(set(tr_attrs + ts_attrs))), \\ sorted(list(set(tr_objs + ts_objs))) all_pairs = sorted(list(set(tr_pairs", "= activation_data['features'].size(1) # load splits self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs = \\ self.parse_split()", "self.triple: image, sub, verb, obj = instance['image'], instance['sub'], \\ instance['pred'], instance['obj'] key =", "wvec = torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]] = wvec new_embs = [] for k in", "\\ else self.test_pairs self.pair2idx = {pair: idx for idx, pair in enumerate(self.curr_pairs)} if", "sorted(list(set(tr_objs + ts_objs + tr_subs + ts_subs))) else: tr_attrs, tr_objs, tr_pairs = parse_pairs(", "obj) if key in self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key)) return images, targets def parse_split(self): def", "import torch.utils.data as data from os.path import join, exists def load_word_embeddings(emb_file, vocab): vocab", "'objs_embs.t7') if exists(obj_emb_file): self.objs_embds = torch.load(obj_emb_file) else: self.objs_embds = load_word_embeddings( self.emb_file, self.objs) torch.save(self.objs_embds,", "= triple # SPO triplet is used in StanfordVRD self.feat = feat feat_file", "self.embeddings = [torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for (attr, obj) in self.curr_pairs] self.embeddings = torch.stack(self.embeddings)", "dict( zip(activation_data['files'], activation_data['features'])) self.feat_dim = activation_data['features'].size(1) # load splits self.attrs, self.objs, self.pairs, self.train_pairs,", "key = (attr, obj) if key in self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key)) return images, targets", "self.pairs, self.train_pairs, self.test_pairs = \\ self.parse_split() assert len(set(self.train_pairs) & set(self.test_pairs)) == 0, \\", "if '-' in k: ks = k.split('-') emb = torch.stack([embeds[it] for it in", "key in self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key)) return images, targets def parse_split(self): def parse_pairs(pair_list): with", "get_split_info(self): data = torch.load(self.root + '/metadata.t7') images, targets = [], [] for instance", "feat_file = '{}/{}_features.t7'.format(root, feat) activation_data = torch.load(feat_file) self.activations = dict( zip(activation_data['files'], activation_data['features'])) self.feat_dim", "in self.curr_pairs] self.embeddings = torch.stack(self.embeddings) self.data, self.labels = self.get_split_info() def get_split_info(self): data =", "for line in open(emb_file, 'r'): line = line.strip().split(' ') wvec = torch.FloatTensor(list(map(float, line[1:])))", "ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs, all_objs = sorted(list(set(tr_attrs + ts_attrs))), \\", "ks]).mean(dim=0) else: emb = embeds[k] new_embs.append(emb) embeds = torch.stack(new_embs) return embeds class CompositionDataset(data.Dataset):", "sorted(list(set(tr_attrs + ts_attrs))), \\ sorted(list(set(tr_objs + ts_objs))) all_pairs = sorted(list(set(tr_pairs + ts_pairs))) return", "self.triple: tr_subs, tr_verbs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs, ts_verbs, ts_objs, ts_pairs", "objects # share the same label space all_attrs, all_objs = sorted(list(set(tr_verbs + ts_verbs))),", "instance['obj'] key = (sub, verb, obj) else: image, attr, obj = instance['image'], instance['attr'],", "= root # default is `data/compositional-zs` self.phase = phase self.split = split self.emb_file", "os.path import join, exists def load_word_embeddings(emb_file, vocab): vocab = [v.lower() for v in", "new_embs = [] for k in vocab: if '-' in k: ks =", "join(root, 'objs_embs.t7') if exists(obj_emb_file): self.objs_embds = torch.load(obj_emb_file) else: self.objs_embds = load_word_embeddings( self.emb_file, self.objs)", "join, exists def load_word_embeddings(emb_file, vocab): vocab = [v.lower() for v in vocab] embeds", "parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs, all_objs =", "self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key)) return images, targets def parse_split(self): def parse_pairs(pair_list): with open(pair_list, 'r')", "= (sub, verb, obj) else: image, attr, obj = instance['image'], instance['attr'], \\ instance['obj']", "self.parse_split() assert len(set(self.train_pairs) & set(self.test_pairs)) == 0, \\ 'train and test are not", "'train and test are not mutually exclusive' # load pretrained word2vec embeddings if", "if exists(att_emb_file): self.attrs_embds = torch.load(att_emb_file) else: self.attrs_embds = load_word_embeddings( self.emb_file, self.attrs) torch.save(self.attrs_embds, att_emb_file)", "att_emb_file) obj_emb_file = join(root, 'objs_embs.t7') if exists(obj_emb_file): self.objs_embds = torch.load(obj_emb_file) else: self.objs_embds =", "= {pair: idx for idx, pair in enumerate(self.curr_pairs)} if self.triple: self.embeddings = [torch.cat([", "in enumerate(self.curr_pairs)} if self.triple: self.embeddings = [torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for (sub, verb,", "else: raise NotImplementedError self.curr_pairs = self.train_pairs if self.phase == 'train' \\ else self.test_pairs", "load splits self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs = \\ self.parse_split() assert len(set(self.train_pairs) &", "self.objs_embds = load_word_embeddings( self.emb_file, self.objs) torch.save(self.objs_embds, obj_emb_file) else: raise NotImplementedError self.curr_pairs = self.train_pairs", "self.emb_file is not None: att_emb_file = join(root, 'attrs_embs.t7') if exists(att_emb_file): self.attrs_embds = torch.load(att_emb_file)", "self.split = split self.emb_file = join(root, emb_file) self.triple = triple # SPO triplet", "if exists(obj_emb_file): self.objs_embds = torch.load(obj_emb_file) else: self.objs_embds = load_word_embeddings( self.emb_file, self.objs) torch.save(self.objs_embds, obj_emb_file)", "torch.stack(new_embs) return embeds class CompositionDataset(data.Dataset): def __init__(self, root, phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'):", "torch.load(self.root + '/metadata.t7') images, targets = [], [] for instance in data: if", "enumerate(self.curr_pairs)} if self.triple: self.embeddings = [torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for (sub, verb, obj)", "(sub, verb, obj) else: image, attr, obj = instance['image'], instance['attr'], \\ instance['obj'] key", "= f.read().strip().split('\\n') pairs = [t.split() for t in pairs] pairs = list(map(tuple, pairs))", "objs, pairs if self.triple: tr_subs, tr_verbs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs,", "activation_data['features'])) self.feat_dim = activation_data['features'].size(1) # load splits self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs =", "[] for instance in data: if self.triple: image, sub, verb, obj = instance['image'],", "are not mutually exclusive' # load pretrained word2vec embeddings if self.emb_file is not", "= instance['image'], instance['attr'], \\ instance['obj'] key = (attr, obj) if key in self.curr_pairs:", "parse_pairs(pair_list): with open(pair_list, 'r') as f: pairs = f.read().strip().split('\\n') pairs = [t.split() for", "= self.data[index], self.labels[index] feat = self.activations[image] return feat, label def __len__(self): return len(self.data)", "= list(map(tuple, pairs)) if self.triple: subs, verbs, objs = zip(*pairs) return subs, verbs,", "self.pair2idx = {pair: idx for idx, pair in enumerate(self.curr_pairs)} if self.triple: self.embeddings =", "[v.lower() for v in vocab] embeds = {} for line in open(emb_file, 'r'):", "= self.train_pairs if self.phase == 'train' \\ else self.test_pairs self.pair2idx = {pair: idx", "new_embs.append(emb) embeds = torch.stack(new_embs) return embeds class CompositionDataset(data.Dataset): def __init__(self, root, phase, split='compositional-split',", "for (attr, obj) in self.curr_pairs] self.embeddings = torch.stack(self.embeddings) self.data, self.labels = self.get_split_info() def", "') wvec = torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]] = wvec new_embs = [] for k", "images, targets = [], [] for instance in data: if self.triple: image, sub,", "pairs = f.read().strip().split('\\n') pairs = [t.split() for t in pairs] pairs = list(map(tuple,", "parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs, ts_verbs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) # The", "self.attrs_embds = torch.load(att_emb_file) else: self.attrs_embds = load_word_embeddings( self.emb_file, self.attrs) torch.save(self.attrs_embds, att_emb_file) obj_emb_file =", "as f: pairs = f.read().strip().split('\\n') pairs = [t.split() for t in pairs] pairs", "if self.triple: subs, verbs, objs = zip(*pairs) return subs, verbs, objs, pairs else:", "verbs, objs = zip(*pairs) return subs, verbs, objs, pairs else: attrs, objs =", "\\ 'train and test are not mutually exclusive' # load pretrained word2vec embeddings", "feat feat_file = '{}/{}_features.t7'.format(root, feat) activation_data = torch.load(feat_file) self.activations = dict( zip(activation_data['files'], activation_data['features']))", "are now the `verbs` and the subjects and objects # share the same", "vocab): vocab = [v.lower() for v in vocab] embeds = {} for line", "feat) activation_data = torch.load(feat_file) self.activations = dict( zip(activation_data['files'], activation_data['features'])) self.feat_dim = activation_data['features'].size(1) #", "label space all_attrs, all_objs = sorted(list(set(tr_verbs + ts_verbs))), \\ sorted(list(set(tr_objs + ts_objs +", "activation_data = torch.load(feat_file) self.activations = dict( zip(activation_data['files'], activation_data['features'])) self.feat_dim = activation_data['features'].size(1) # load", "pairs else: attrs, objs = zip(*pairs) return attrs, objs, pairs if self.triple: tr_subs,", "in self.curr_pairs] else: self.embeddings = [torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for (attr, obj) in self.curr_pairs]", "sorted(list(set(tr_verbs + ts_verbs))), \\ sorted(list(set(tr_objs + ts_objs + tr_subs + ts_subs))) else: tr_attrs,", "for v in vocab] embeds = {} for line in open(emb_file, 'r'): line", "= embeds[k] new_embs.append(emb) embeds = torch.stack(new_embs) return embeds class CompositionDataset(data.Dataset): def __init__(self, root,", "instance['obj'] key = (attr, obj) if key in self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key)) return images,", "NotImplementedError self.curr_pairs = self.train_pairs if self.phase == 'train' \\ else self.test_pairs self.pair2idx =", "`verbs` and the subjects and objects # share the same label space all_attrs,", "= parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) # The attributes are now the `verbs` and the", "if self.emb_file is not None: att_emb_file = join(root, 'attrs_embs.t7') if exists(att_emb_file): self.attrs_embds =", "self.curr_pairs] self.embeddings = torch.stack(self.embeddings) self.data, self.labels = self.get_split_info() def get_split_info(self): data = torch.load(self.root", "self.objs, self.pairs, self.train_pairs, self.test_pairs = \\ self.parse_split() assert len(set(self.train_pairs) & set(self.test_pairs)) == 0,", "embeds class CompositionDataset(data.Dataset): def __init__(self, root, phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'): self.root =", "+ ts_objs))) all_pairs = sorted(list(set(tr_pairs + ts_pairs))) return all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs", "self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs = \\ self.parse_split() assert len(set(self.train_pairs) & set(self.test_pairs)) ==", "= sorted(list(set(tr_pairs + ts_pairs))) return all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs def __getitem__(self, index):", "ts_subs))) else: tr_attrs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs, ts_objs, ts_pairs =", "self.train_pairs if self.phase == 'train' \\ else self.test_pairs self.pair2idx = {pair: idx for", "splits self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs = \\ self.parse_split() assert len(set(self.train_pairs) & set(self.test_pairs))", "obj_emb_file = join(root, 'objs_embs.t7') if exists(obj_emb_file): self.objs_embds = torch.load(obj_emb_file) else: self.objs_embds = load_word_embeddings(", "= torch.load(obj_emb_file) else: self.objs_embds = load_word_embeddings( self.emb_file, self.objs) torch.save(self.objs_embds, obj_emb_file) else: raise NotImplementedError", "ts_verbs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) # The attributes are now the", "= \\ self.parse_split() assert len(set(self.train_pairs) & set(self.test_pairs)) == 0, \\ 'train and test", "{pair: idx for idx, pair in enumerate(self.curr_pairs)} if self.triple: self.embeddings = [torch.cat([ self.objs_embds[self.objs.index(sub)],", "join(root, 'attrs_embs.t7') if exists(att_emb_file): self.attrs_embds = torch.load(att_emb_file) else: self.attrs_embds = load_word_embeddings( self.emb_file, self.attrs)", "self.phase == 'train' \\ else self.test_pairs self.pair2idx = {pair: idx for idx, pair", "import torch import torch.utils.data as data from os.path import join, exists def load_word_embeddings(emb_file,", "ts_attrs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs, all_objs = sorted(list(set(tr_attrs + ts_attrs))),", "self.test_pairs self.pair2idx = {pair: idx for idx, pair in enumerate(self.curr_pairs)} if self.triple: self.embeddings", "attrs, objs = zip(*pairs) return attrs, objs, pairs if self.triple: tr_subs, tr_verbs, tr_objs,", "obj) else: image, attr, obj = instance['image'], instance['attr'], \\ instance['obj'] key = (attr,", "embeds[line[0]] = wvec new_embs = [] for k in vocab: if '-' in", "else: emb = embeds[k] new_embs.append(emb) embeds = torch.stack(new_embs) return embeds class CompositionDataset(data.Dataset): def", "label = self.data[index], self.labels[index] feat = self.activations[image] return feat, label def __len__(self): return", "= instance['image'], instance['sub'], \\ instance['pred'], instance['obj'] key = (sub, verb, obj) else: image,", "in open(emb_file, 'r'): line = line.strip().split(' ') wvec = torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]] =", "f.read().strip().split('\\n') pairs = [t.split() for t in pairs] pairs = list(map(tuple, pairs)) if", "test are not mutually exclusive' # load pretrained word2vec embeddings if self.emb_file is", "= split self.emb_file = join(root, emb_file) self.triple = triple # SPO triplet is", "tr_attrs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root,", "# load splits self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs = \\ self.parse_split() assert len(set(self.train_pairs)", "for t in pairs] pairs = list(map(tuple, pairs)) if self.triple: subs, verbs, objs", "self.triple = triple # SPO triplet is used in StanfordVRD self.feat = feat", "not mutually exclusive' # load pretrained word2vec embeddings if self.emb_file is not None:", "vocab: if '-' in k: ks = k.split('-') emb = torch.stack([embeds[it] for it", "ts_pairs def __getitem__(self, index): image, label = self.data[index], self.labels[index] feat = self.activations[image] return", "return subs, verbs, objs, pairs else: attrs, objs = zip(*pairs) return attrs, objs,", "data: if self.triple: image, sub, verb, obj = instance['image'], instance['sub'], \\ instance['pred'], instance['obj']", "image, sub, verb, obj = instance['image'], instance['sub'], \\ instance['pred'], instance['obj'] key = (sub,", "class CompositionDataset(data.Dataset): def __init__(self, root, phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'): self.root = root", "triplet is used in StanfordVRD self.feat = feat feat_file = '{}/{}_features.t7'.format(root, feat) activation_data", "# default is `data/compositional-zs` self.phase = phase self.split = split self.emb_file = join(root,", "__init__(self, root, phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'): self.root = root # default is", "k.split('-') emb = torch.stack([embeds[it] for it in ks]).mean(dim=0) else: emb = embeds[k] new_embs.append(emb)", "= torch.load(att_emb_file) else: self.attrs_embds = load_word_embeddings( self.emb_file, self.attrs) torch.save(self.attrs_embds, att_emb_file) obj_emb_file = join(root,", "word2vec embeddings if self.emb_file is not None: att_emb_file = join(root, 'attrs_embs.t7') if exists(att_emb_file):", "self.data, self.labels = self.get_split_info() def get_split_info(self): data = torch.load(self.root + '/metadata.t7') images, targets", "images, targets def parse_split(self): def parse_pairs(pair_list): with open(pair_list, 'r') as f: pairs =", "StanfordVRD self.feat = feat feat_file = '{}/{}_features.t7'.format(root, feat) activation_data = torch.load(feat_file) self.activations =", "load_word_embeddings( self.emb_file, self.objs) torch.save(self.objs_embds, obj_emb_file) else: raise NotImplementedError self.curr_pairs = self.train_pairs if self.phase", "if self.phase == 'train' \\ else self.test_pairs self.pair2idx = {pair: idx for idx,", "def parse_split(self): def parse_pairs(pair_list): with open(pair_list, 'r') as f: pairs = f.read().strip().split('\\n') pairs", "= parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs, all_objs", "is not None: att_emb_file = join(root, 'attrs_embs.t7') if exists(att_emb_file): self.attrs_embds = torch.load(att_emb_file) else:", "vocab = [v.lower() for v in vocab] embeds = {} for line in", "[] for k in vocab: if '-' in k: ks = k.split('-') emb", "self.train_pairs, self.test_pairs = \\ self.parse_split() assert len(set(self.train_pairs) & set(self.test_pairs)) == 0, \\ 'train", "= torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]] = wvec new_embs = [] for k in vocab:", "emb = torch.stack([embeds[it] for it in ks]).mean(dim=0) else: emb = embeds[k] new_embs.append(emb) embeds", "= join(root, emb_file) self.triple = triple # SPO triplet is used in StanfordVRD", "self.root = root # default is `data/compositional-zs` self.phase = phase self.split = split", "= zip(*pairs) return attrs, objs, pairs if self.triple: tr_subs, tr_verbs, tr_objs, tr_pairs =", "it in ks]).mean(dim=0) else: emb = embeds[k] new_embs.append(emb) embeds = torch.stack(new_embs) return embeds", "import join, exists def load_word_embeddings(emb_file, vocab): vocab = [v.lower() for v in vocab]", "emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'): self.root = root # default is `data/compositional-zs` self.phase = phase", "= parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs, ts_verbs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) #", "ts_pairs))) return all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs def __getitem__(self, index): image, label =", "= [torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for (attr, obj) in self.curr_pairs] self.embeddings = torch.stack(self.embeddings) self.data,", "self.test_pairs = \\ self.parse_split() assert len(set(self.train_pairs) & set(self.test_pairs)) == 0, \\ 'train and", "pair in enumerate(self.curr_pairs)} if self.triple: self.embeddings = [torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for (sub,", "ks = k.split('-') emb = torch.stack([embeds[it] for it in ks]).mean(dim=0) else: emb =", "def get_split_info(self): data = torch.load(self.root + '/metadata.t7') images, targets = [], [] for", "len(set(self.train_pairs) & set(self.test_pairs)) == 0, \\ 'train and test are not mutually exclusive'", "# SPO triplet is used in StanfordVRD self.feat = feat feat_file = '{}/{}_features.t7'.format(root,", "+ ts_attrs))), \\ sorted(list(set(tr_objs + ts_objs))) all_pairs = sorted(list(set(tr_pairs + ts_pairs))) return all_attrs,", "self.curr_pairs] else: self.embeddings = [torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for (attr, obj) in self.curr_pairs] self.embeddings", "self.objs_embds[self.objs.index(obj)]]) for (attr, obj) in self.curr_pairs] self.embeddings = torch.stack(self.embeddings) self.data, self.labels = self.get_split_info()", "def parse_pairs(pair_list): with open(pair_list, 'r') as f: pairs = f.read().strip().split('\\n') pairs = [t.split()", "== 'train' \\ else self.test_pairs self.pair2idx = {pair: idx for idx, pair in", "'{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs, all_objs = sorted(list(set(tr_attrs + ts_attrs))), \\ sorted(list(set(tr_objs + ts_objs))) all_pairs", "self.embeddings = torch.stack(self.embeddings) self.data, self.labels = self.get_split_info() def get_split_info(self): data = torch.load(self.root +", "data = torch.load(self.root + '/metadata.t7') images, targets = [], [] for instance in", "self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for (attr, obj) in self.curr_pairs] self.embeddings = torch.stack(self.embeddings) self.data, self.labels =", "ts_subs, ts_verbs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) # The attributes are now", "attrs, objs, pairs if self.triple: tr_subs, tr_verbs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split))", "phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'): self.root = root # default is `data/compositional-zs` self.phase", "zip(activation_data['files'], activation_data['features'])) self.feat_dim = activation_data['features'].size(1) # load splits self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs", "= phase self.split = split self.emb_file = join(root, emb_file) self.triple = triple #", "self.emb_file, self.attrs) torch.save(self.attrs_embds, att_emb_file) obj_emb_file = join(root, 'objs_embs.t7') if exists(obj_emb_file): self.objs_embds = torch.load(obj_emb_file)", "torch.load(feat_file) self.activations = dict( zip(activation_data['files'], activation_data['features'])) self.feat_dim = activation_data['features'].size(1) # load splits self.attrs,", "list(map(tuple, pairs)) if self.triple: subs, verbs, objs = zip(*pairs) return subs, verbs, objs,", "= wvec new_embs = [] for k in vocab: if '-' in k:", "instance['attr'], \\ instance['obj'] key = (attr, obj) if key in self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key))", "open(pair_list, 'r') as f: pairs = f.read().strip().split('\\n') pairs = [t.split() for t in", "idx for idx, pair in enumerate(self.curr_pairs)} if self.triple: self.embeddings = [torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)],", "feat='resnet18'): self.root = root # default is `data/compositional-zs` self.phase = phase self.split =", "raise NotImplementedError self.curr_pairs = self.train_pairs if self.phase == 'train' \\ else self.test_pairs self.pair2idx", "is `data/compositional-zs` self.phase = phase self.split = split self.emb_file = join(root, emb_file) self.triple", "verb, obj) else: image, attr, obj = instance['image'], instance['attr'], \\ instance['obj'] key =", "self.emb_file = join(root, emb_file) self.triple = triple # SPO triplet is used in", "verbs, objs, pairs else: attrs, objs = zip(*pairs) return attrs, objs, pairs if", "= parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs, all_objs = sorted(list(set(tr_attrs + ts_attrs))), \\ sorted(list(set(tr_objs +", "SPO triplet is used in StanfordVRD self.feat = feat feat_file = '{}/{}_features.t7'.format(root, feat)", "\\ instance['obj'] key = (attr, obj) if key in self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key)) return", "for k in vocab: if '-' in k: ks = k.split('-') emb =", "= torch.load(self.root + '/metadata.t7') images, targets = [], [] for instance in data:", "targets.append(self.curr_pairs.index(key)) return images, targets def parse_split(self): def parse_pairs(pair_list): with open(pair_list, 'r') as f:", "if self.triple: image, sub, verb, obj = instance['image'], instance['sub'], \\ instance['pred'], instance['obj'] key", "= torch.stack(new_embs) return embeds class CompositionDataset(data.Dataset): def __init__(self, root, phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False,", "wvec new_embs = [] for k in vocab: if '-' in k: ks", "# share the same label space all_attrs, all_objs = sorted(list(set(tr_verbs + ts_verbs))), \\", "= join(root, 'objs_embs.t7') if exists(obj_emb_file): self.objs_embds = torch.load(obj_emb_file) else: self.objs_embds = load_word_embeddings( self.emb_file,", "def __getitem__(self, index): image, label = self.data[index], self.labels[index] feat = self.activations[image] return feat,", "and the subjects and objects # share the same label space all_attrs, all_objs", "all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs def __getitem__(self, index): image, label = self.data[index], self.labels[index]", "embeds[k] new_embs.append(emb) embeds = torch.stack(new_embs) return embeds class CompositionDataset(data.Dataset): def __init__(self, root, phase,", "return embeds class CompositionDataset(data.Dataset): def __init__(self, root, phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'): self.root", "parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) # The attributes are now the `verbs` and the subjects", "subjects and objects # share the same label space all_attrs, all_objs = sorted(list(set(tr_verbs", "obj) in self.curr_pairs] else: self.embeddings = [torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for (attr, obj) in", "`data/compositional-zs` self.phase = phase self.split = split self.emb_file = join(root, emb_file) self.triple =", "return attrs, objs, pairs if self.triple: tr_subs, tr_verbs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root,", "targets = [], [] for instance in data: if self.triple: image, sub, verb,", "instance['image'], instance['attr'], \\ instance['obj'] key = (attr, obj) if key in self.curr_pairs: images.append(image)", "instance['image'], instance['sub'], \\ instance['pred'], instance['obj'] key = (sub, verb, obj) else: image, attr,", "(attr, obj) if key in self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key)) return images, targets def parse_split(self):", "obj = instance['image'], instance['attr'], \\ instance['obj'] key = (attr, obj) if key in", "'{}/{}_features.t7'.format(root, feat) activation_data = torch.load(feat_file) self.activations = dict( zip(activation_data['files'], activation_data['features'])) self.feat_dim = activation_data['features'].size(1)", "# load pretrained word2vec embeddings if self.emb_file is not None: att_emb_file = join(root,", "t in pairs] pairs = list(map(tuple, pairs)) if self.triple: subs, verbs, objs =", "line in open(emb_file, 'r'): line = line.strip().split(' ') wvec = torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]]", "tr_pairs, ts_pairs def __getitem__(self, index): image, label = self.data[index], self.labels[index] feat = self.activations[image]", "else: tr_attrs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs, ts_objs, ts_pairs = parse_pairs(", "phase self.split = split self.emb_file = join(root, emb_file) self.triple = triple # SPO", "in data: if self.triple: image, sub, verb, obj = instance['image'], instance['sub'], \\ instance['pred'],", "& set(self.test_pairs)) == 0, \\ 'train and test are not mutually exclusive' #", "= sorted(list(set(tr_verbs + ts_verbs))), \\ sorted(list(set(tr_objs + ts_objs + tr_subs + ts_subs))) else:", "self.objs) torch.save(self.objs_embds, obj_emb_file) else: raise NotImplementedError self.curr_pairs = self.train_pairs if self.phase == 'train'", "join(root, emb_file) self.triple = triple # SPO triplet is used in StanfordVRD self.feat", "from os.path import join, exists def load_word_embeddings(emb_file, vocab): vocab = [v.lower() for v", "used in StanfordVRD self.feat = feat feat_file = '{}/{}_features.t7'.format(root, feat) activation_data = torch.load(feat_file)", "sorted(list(set(tr_objs + ts_objs))) all_pairs = sorted(list(set(tr_pairs + ts_pairs))) return all_attrs, all_objs, all_pairs, tr_pairs,", "= line.strip().split(' ') wvec = torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]] = wvec new_embs = []", "torch.stack(self.embeddings) self.data, self.labels = self.get_split_info() def get_split_info(self): data = torch.load(self.root + '/metadata.t7') images,", "now the `verbs` and the subjects and objects # share the same label", "\\ instance['pred'], instance['obj'] key = (sub, verb, obj) else: image, attr, obj =", "self.split)) all_attrs, all_objs = sorted(list(set(tr_attrs + ts_attrs))), \\ sorted(list(set(tr_objs + ts_objs))) all_pairs =", "= (attr, obj) if key in self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key)) return images, targets def", "zip(*pairs) return subs, verbs, objs, pairs else: attrs, objs = zip(*pairs) return attrs,", "tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs, ts_verbs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split))", "all_objs = sorted(list(set(tr_attrs + ts_attrs))), \\ sorted(list(set(tr_objs + ts_objs))) all_pairs = sorted(list(set(tr_pairs +", "'-' in k: ks = k.split('-') emb = torch.stack([embeds[it] for it in ks]).mean(dim=0)", "embeds = torch.stack(new_embs) return embeds class CompositionDataset(data.Dataset): def __init__(self, root, phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt',", "self.activations = dict( zip(activation_data['files'], activation_data['features'])) self.feat_dim = activation_data['features'].size(1) # load splits self.attrs, self.objs,", "in vocab: if '-' in k: ks = k.split('-') emb = torch.stack([embeds[it] for", "in k: ks = k.split('-') emb = torch.stack([embeds[it] for it in ks]).mean(dim=0) else:", "same label space all_attrs, all_objs = sorted(list(set(tr_verbs + ts_verbs))), \\ sorted(list(set(tr_objs + ts_objs", "all_pairs = sorted(list(set(tr_pairs + ts_pairs))) return all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs def __getitem__(self,", "+ ts_objs + tr_subs + ts_subs))) else: tr_attrs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root,", "parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs, all_objs = sorted(list(set(tr_attrs + ts_attrs))), \\ sorted(list(set(tr_objs + ts_objs)))", "root # default is `data/compositional-zs` self.phase = phase self.split = split self.emb_file =", "all_attrs, all_objs = sorted(list(set(tr_verbs + ts_verbs))), \\ sorted(list(set(tr_objs + ts_objs + tr_subs +", "= feat feat_file = '{}/{}_features.t7'.format(root, feat) activation_data = torch.load(feat_file) self.activations = dict( zip(activation_data['files'],", "in vocab] embeds = {} for line in open(emb_file, 'r'): line = line.strip().split('", "else: self.embeddings = [torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for (attr, obj) in self.curr_pairs] self.embeddings =", "[torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for (attr, obj) in self.curr_pairs] self.embeddings = torch.stack(self.embeddings) self.data, self.labels", "objs, pairs else: attrs, objs = zip(*pairs) return attrs, objs, pairs if self.triple:", "emb_file) self.triple = triple # SPO triplet is used in StanfordVRD self.feat =", "tr_verbs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs, ts_verbs, ts_objs, ts_pairs = parse_pairs(", "exists(obj_emb_file): self.objs_embds = torch.load(obj_emb_file) else: self.objs_embds = load_word_embeddings( self.emb_file, self.objs) torch.save(self.objs_embds, obj_emb_file) else:", "v in vocab] embeds = {} for line in open(emb_file, 'r'): line =", "+ '/metadata.t7') images, targets = [], [] for instance in data: if self.triple:", "pairs = [t.split() for t in pairs] pairs = list(map(tuple, pairs)) if self.triple:", "and test are not mutually exclusive' # load pretrained word2vec embeddings if self.emb_file", "torch import torch.utils.data as data from os.path import join, exists def load_word_embeddings(emb_file, vocab):", "k in vocab: if '-' in k: ks = k.split('-') emb = torch.stack([embeds[it]", "objs = zip(*pairs) return attrs, objs, pairs if self.triple: tr_subs, tr_verbs, tr_objs, tr_pairs", "The attributes are now the `verbs` and the subjects and objects # share", "space all_attrs, all_objs = sorted(list(set(tr_verbs + ts_verbs))), \\ sorted(list(set(tr_objs + ts_objs + tr_subs", "= load_word_embeddings( self.emb_file, self.objs) torch.save(self.objs_embds, obj_emb_file) else: raise NotImplementedError self.curr_pairs = self.train_pairs if", "'attrs_embs.t7') if exists(att_emb_file): self.attrs_embds = torch.load(att_emb_file) else: self.attrs_embds = load_word_embeddings( self.emb_file, self.attrs) torch.save(self.attrs_embds,", "obj_emb_file) else: raise NotImplementedError self.curr_pairs = self.train_pairs if self.phase == 'train' \\ else", "{} for line in open(emb_file, 'r'): line = line.strip().split(' ') wvec = torch.FloatTensor(list(map(float,", "= join(root, 'attrs_embs.t7') if exists(att_emb_file): self.attrs_embds = torch.load(att_emb_file) else: self.attrs_embds = load_word_embeddings( self.emb_file,", "ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) # The attributes are now the `verbs` and", "[t.split() for t in pairs] pairs = list(map(tuple, pairs)) if self.triple: subs, verbs,", "+ ts_verbs))), \\ sorted(list(set(tr_objs + ts_objs + tr_subs + ts_subs))) else: tr_attrs, tr_objs,", "image, label = self.data[index], self.labels[index] feat = self.activations[image] return feat, label def __len__(self):", "for (sub, verb, obj) in self.curr_pairs] else: self.embeddings = [torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for", "f: pairs = f.read().strip().split('\\n') pairs = [t.split() for t in pairs] pairs =", "pairs] pairs = list(map(tuple, pairs)) if self.triple: subs, verbs, objs = zip(*pairs) return", "all_pairs, tr_pairs, ts_pairs def __getitem__(self, index): image, label = self.data[index], self.labels[index] feat =", "k: ks = k.split('-') emb = torch.stack([embeds[it] for it in ks]).mean(dim=0) else: emb", "instance in data: if self.triple: image, sub, verb, obj = instance['image'], instance['sub'], \\", "tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs, ts_verbs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root,", "= '{}/{}_features.t7'.format(root, feat) activation_data = torch.load(feat_file) self.activations = dict( zip(activation_data['files'], activation_data['features'])) self.feat_dim =", "the `verbs` and the subjects and objects # share the same label space", "root, phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'): self.root = root # default is `data/compositional-zs`", "self.feat = feat feat_file = '{}/{}_features.t7'.format(root, feat) activation_data = torch.load(feat_file) self.activations = dict(", "pairs = list(map(tuple, pairs)) if self.triple: subs, verbs, objs = zip(*pairs) return subs,", "subs, verbs, objs = zip(*pairs) return subs, verbs, objs, pairs else: attrs, objs", "if self.triple: tr_subs, tr_verbs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs, ts_verbs, ts_objs,", "exclusive' # load pretrained word2vec embeddings if self.emb_file is not None: att_emb_file =", "<filename>compositional-zs/compose_data.py import torch import torch.utils.data as data from os.path import join, exists def", "= load_word_embeddings( self.emb_file, self.attrs) torch.save(self.attrs_embds, att_emb_file) obj_emb_file = join(root, 'objs_embs.t7') if exists(obj_emb_file): self.objs_embds", "attr, obj = instance['image'], instance['attr'], \\ instance['obj'] key = (attr, obj) if key", "share the same label space all_attrs, all_objs = sorted(list(set(tr_verbs + ts_verbs))), \\ sorted(list(set(tr_objs", "in pairs] pairs = list(map(tuple, pairs)) if self.triple: subs, verbs, objs = zip(*pairs)", "line.strip().split(' ') wvec = torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]] = wvec new_embs = [] for", "'/metadata.t7') images, targets = [], [] for instance in data: if self.triple: image,", "tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs,", "subs, verbs, objs, pairs else: attrs, objs = zip(*pairs) return attrs, objs, pairs", "'{}/{}/test_pairs.txt'.format(self.root, self.split)) # The attributes are now the `verbs` and the subjects and", "with open(pair_list, 'r') as f: pairs = f.read().strip().split('\\n') pairs = [t.split() for t", "tr_subs + ts_subs))) else: tr_attrs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs, ts_objs,", "+ ts_subs))) else: tr_attrs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs, ts_objs, ts_pairs", "verb, obj) in self.curr_pairs] else: self.embeddings = [torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for (attr, obj)", "in StanfordVRD self.feat = feat feat_file = '{}/{}_features.t7'.format(root, feat) activation_data = torch.load(feat_file) self.activations", "else self.test_pairs self.pair2idx = {pair: idx for idx, pair in enumerate(self.curr_pairs)} if self.triple:", "self.phase = phase self.split = split self.emb_file = join(root, emb_file) self.triple = triple", "ts_objs))) all_pairs = sorted(list(set(tr_pairs + ts_pairs))) return all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs def", "'{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs, all_objs = sorted(list(set(tr_attrs", "'r') as f: pairs = f.read().strip().split('\\n') pairs = [t.split() for t in pairs]", "key = (sub, verb, obj) else: image, attr, obj = instance['image'], instance['attr'], \\", "sorted(list(set(tr_pairs + ts_pairs))) return all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs def __getitem__(self, index): image,", "split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'): self.root = root # default is `data/compositional-zs` self.phase =", "all_objs = sorted(list(set(tr_verbs + ts_verbs))), \\ sorted(list(set(tr_objs + ts_objs + tr_subs + ts_subs)))", "(sub, verb, obj) in self.curr_pairs] else: self.embeddings = [torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]]) for (attr,", "= k.split('-') emb = torch.stack([embeds[it] for it in ks]).mean(dim=0) else: emb = embeds[k]", "verb, obj = instance['image'], instance['sub'], \\ instance['pred'], instance['obj'] key = (sub, verb, obj)", "def __init__(self, root, phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'): self.root = root # default", "self.triple: subs, verbs, objs = zip(*pairs) return subs, verbs, objs, pairs else: attrs,", "zip(*pairs) return attrs, objs, pairs if self.triple: tr_subs, tr_verbs, tr_objs, tr_pairs = parse_pairs(", "self.split)) ts_subs, ts_verbs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) # The attributes are", "self.objs_embds = torch.load(obj_emb_file) else: self.objs_embds = load_word_embeddings( self.emb_file, self.objs) torch.save(self.objs_embds, obj_emb_file) else: raise", "else: attrs, objs = zip(*pairs) return attrs, objs, pairs if self.triple: tr_subs, tr_verbs,", "the same label space all_attrs, all_objs = sorted(list(set(tr_verbs + ts_verbs))), \\ sorted(list(set(tr_objs +", "def load_word_embeddings(emb_file, vocab): vocab = [v.lower() for v in vocab] embeds = {}", "self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for (sub, verb, obj) in self.curr_pairs] else: self.embeddings = [torch.cat([ self.attrs_embds[self.attrs.index(attr)],", "instance['pred'], instance['obj'] key = (sub, verb, obj) else: image, attr, obj = instance['image'],", "embeds = {} for line in open(emb_file, 'r'): line = line.strip().split(' ') wvec", "= [v.lower() for v in vocab] embeds = {} for line in open(emb_file,", "return all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs def __getitem__(self, index): image, label = self.data[index],", "'train' \\ else self.test_pairs self.pair2idx = {pair: idx for idx, pair in enumerate(self.curr_pairs)}", "= torch.load(feat_file) self.activations = dict( zip(activation_data['files'], activation_data['features'])) self.feat_dim = activation_data['features'].size(1) # load splits", "= {} for line in open(emb_file, 'r'): line = line.strip().split(' ') wvec =", "+ ts_pairs))) return all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs def __getitem__(self, index): image, label", "= [t.split() for t in pairs] pairs = list(map(tuple, pairs)) if self.triple: subs,", "self.objs_embds[self.objs.index(obj)]]) for (sub, verb, obj) in self.curr_pairs] else: self.embeddings = [torch.cat([ self.attrs_embds[self.attrs.index(attr)], self.objs_embds[self.objs.index(obj)]])", "and objects # share the same label space all_attrs, all_objs = sorted(list(set(tr_verbs +", "set(self.test_pairs)) == 0, \\ 'train and test are not mutually exclusive' # load", "load pretrained word2vec embeddings if self.emb_file is not None: att_emb_file = join(root, 'attrs_embs.t7')", "else: self.objs_embds = load_word_embeddings( self.emb_file, self.objs) torch.save(self.objs_embds, obj_emb_file) else: raise NotImplementedError self.curr_pairs =", "self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for (sub, verb, obj) in self.curr_pairs] else: self.embeddings = [torch.cat([", "pairs if self.triple: tr_subs, tr_verbs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs, ts_verbs,", "obj = instance['image'], instance['sub'], \\ instance['pred'], instance['obj'] key = (sub, verb, obj) else:", "self.curr_pairs = self.train_pairs if self.phase == 'train' \\ else self.test_pairs self.pair2idx = {pair:", "= sorted(list(set(tr_attrs + ts_attrs))), \\ sorted(list(set(tr_objs + ts_objs))) all_pairs = sorted(list(set(tr_pairs + ts_pairs)))", "triple=False, feat='resnet18'): self.root = root # default is `data/compositional-zs` self.phase = phase self.split", "default is `data/compositional-zs` self.phase = phase self.split = split self.emb_file = join(root, emb_file)", "data from os.path import join, exists def load_word_embeddings(emb_file, vocab): vocab = [v.lower() for", "idx, pair in enumerate(self.curr_pairs)} if self.triple: self.embeddings = [torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for", "line = line.strip().split(' ') wvec = torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]] = wvec new_embs =", "\\ sorted(list(set(tr_objs + ts_objs + tr_subs + ts_subs))) else: tr_attrs, tr_objs, tr_pairs =", "self.attrs_embds = load_word_embeddings( self.emb_file, self.attrs) torch.save(self.attrs_embds, att_emb_file) obj_emb_file = join(root, 'objs_embs.t7') if exists(obj_emb_file):", "parse_split(self): def parse_pairs(pair_list): with open(pair_list, 'r') as f: pairs = f.read().strip().split('\\n') pairs =", "for idx, pair in enumerate(self.curr_pairs)} if self.triple: self.embeddings = [torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]])", "= self.get_split_info() def get_split_info(self): data = torch.load(self.root + '/metadata.t7') images, targets = [],", "self.attrs) torch.save(self.attrs_embds, att_emb_file) obj_emb_file = join(root, 'objs_embs.t7') if exists(obj_emb_file): self.objs_embds = torch.load(obj_emb_file) else:", "objs = zip(*pairs) return subs, verbs, objs, pairs else: attrs, objs = zip(*pairs)", "emb = embeds[k] new_embs.append(emb) embeds = torch.stack(new_embs) return embeds class CompositionDataset(data.Dataset): def __init__(self,", "CompositionDataset(data.Dataset): def __init__(self, root, phase, split='compositional-split', emb_file='glove/glove.6B.300d.txt', triple=False, feat='resnet18'): self.root = root #", "if self.triple: self.embeddings = [torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for (sub, verb, obj) in", "__getitem__(self, index): image, label = self.data[index], self.labels[index] feat = self.activations[image] return feat, label", "pretrained word2vec embeddings if self.emb_file is not None: att_emb_file = join(root, 'attrs_embs.t7') if", "ts_attrs))), \\ sorted(list(set(tr_objs + ts_objs))) all_pairs = sorted(list(set(tr_pairs + ts_pairs))) return all_attrs, all_objs,", "None: att_emb_file = join(root, 'attrs_embs.t7') if exists(att_emb_file): self.attrs_embds = torch.load(att_emb_file) else: self.attrs_embds =", "self.feat_dim = activation_data['features'].size(1) # load splits self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs = \\", "triple # SPO triplet is used in StanfordVRD self.feat = feat feat_file =", "for instance in data: if self.triple: image, sub, verb, obj = instance['image'], instance['sub'],", "self.split)) ts_attrs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) all_attrs, all_objs = sorted(list(set(tr_attrs +", "(attr, obj) in self.curr_pairs] self.embeddings = torch.stack(self.embeddings) self.data, self.labels = self.get_split_info() def get_split_info(self):", "if key in self.curr_pairs: images.append(image) targets.append(self.curr_pairs.index(key)) return images, targets def parse_split(self): def parse_pairs(pair_list):", "torch.load(att_emb_file) else: self.attrs_embds = load_word_embeddings( self.emb_file, self.attrs) torch.save(self.attrs_embds, att_emb_file) obj_emb_file = join(root, 'objs_embs.t7')", "targets def parse_split(self): def parse_pairs(pair_list): with open(pair_list, 'r') as f: pairs = f.read().strip().split('\\n')", "exists def load_word_embeddings(emb_file, vocab): vocab = [v.lower() for v in vocab] embeds =", "exists(att_emb_file): self.attrs_embds = torch.load(att_emb_file) else: self.attrs_embds = load_word_embeddings( self.emb_file, self.attrs) torch.save(self.attrs_embds, att_emb_file) obj_emb_file", "[], [] for instance in data: if self.triple: image, sub, verb, obj =", "tr_subs, tr_verbs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs, ts_verbs, ts_objs, ts_pairs =", "load_word_embeddings( self.emb_file, self.attrs) torch.save(self.attrs_embds, att_emb_file) obj_emb_file = join(root, 'objs_embs.t7') if exists(obj_emb_file): self.objs_embds =", "tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split))", "sub, verb, obj = instance['image'], instance['sub'], \\ instance['pred'], instance['obj'] key = (sub, verb,", "self.emb_file, self.objs) torch.save(self.objs_embds, obj_emb_file) else: raise NotImplementedError self.curr_pairs = self.train_pairs if self.phase ==", "obj) in self.curr_pairs] self.embeddings = torch.stack(self.embeddings) self.data, self.labels = self.get_split_info() def get_split_info(self): data", "torch.utils.data as data from os.path import join, exists def load_word_embeddings(emb_file, vocab): vocab =", "ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) # The attributes are now the `verbs`", "return images, targets def parse_split(self): def parse_pairs(pair_list): with open(pair_list, 'r') as f: pairs", "ts_objs + tr_subs + ts_subs))) else: tr_attrs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split))", "images.append(image) targets.append(self.curr_pairs.index(key)) return images, targets def parse_split(self): def parse_pairs(pair_list): with open(pair_list, 'r') as", "all_objs, all_pairs, tr_pairs, ts_pairs def __getitem__(self, index): image, label = self.data[index], self.labels[index] feat", "\\ sorted(list(set(tr_objs + ts_objs))) all_pairs = sorted(list(set(tr_pairs + ts_pairs))) return all_attrs, all_objs, all_pairs,", "torch.load(obj_emb_file) else: self.objs_embds = load_word_embeddings( self.emb_file, self.objs) torch.save(self.objs_embds, obj_emb_file) else: raise NotImplementedError self.curr_pairs", "'{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_subs, ts_verbs, ts_objs, ts_pairs = parse_pairs( '{}/{}/test_pairs.txt'.format(self.root, self.split)) # The attributes", "self.embeddings = [torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for (sub, verb, obj) in self.curr_pairs] else:", "torch.save(self.attrs_embds, att_emb_file) obj_emb_file = join(root, 'objs_embs.t7') if exists(obj_emb_file): self.objs_embds = torch.load(obj_emb_file) else: self.objs_embds", "0, \\ 'train and test are not mutually exclusive' # load pretrained word2vec", "ts_verbs))), \\ sorted(list(set(tr_objs + ts_objs + tr_subs + ts_subs))) else: tr_attrs, tr_objs, tr_pairs", "self.split)) # The attributes are now the `verbs` and the subjects and objects", "self.triple: self.embeddings = [torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for (sub, verb, obj) in self.curr_pairs]", "as data from os.path import join, exists def load_word_embeddings(emb_file, vocab): vocab = [v.lower()", "pairs)) if self.triple: subs, verbs, objs = zip(*pairs) return subs, verbs, objs, pairs", "the subjects and objects # share the same label space all_attrs, all_objs =", "# The attributes are now the `verbs` and the subjects and objects #", "open(emb_file, 'r'): line = line.strip().split(' ') wvec = torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]] = wvec", "in ks]).mean(dim=0) else: emb = embeds[k] new_embs.append(emb) embeds = torch.stack(new_embs) return embeds class", "not None: att_emb_file = join(root, 'attrs_embs.t7') if exists(att_emb_file): self.attrs_embds = torch.load(att_emb_file) else: self.attrs_embds", "[torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for (sub, verb, obj) in self.curr_pairs] else: self.embeddings =", "split self.emb_file = join(root, emb_file) self.triple = triple # SPO triplet is used", "assert len(set(self.train_pairs) & set(self.test_pairs)) == 0, \\ 'train and test are not mutually", "= dict( zip(activation_data['files'], activation_data['features'])) self.feat_dim = activation_data['features'].size(1) # load splits self.attrs, self.objs, self.pairs,", "= torch.stack([embeds[it] for it in ks]).mean(dim=0) else: emb = embeds[k] new_embs.append(emb) embeds =", "att_emb_file = join(root, 'attrs_embs.t7') if exists(att_emb_file): self.attrs_embds = torch.load(att_emb_file) else: self.attrs_embds = load_word_embeddings(", "mutually exclusive' # load pretrained word2vec embeddings if self.emb_file is not None: att_emb_file", "torch.save(self.objs_embds, obj_emb_file) else: raise NotImplementedError self.curr_pairs = self.train_pairs if self.phase == 'train' \\", "index): image, label = self.data[index], self.labels[index] feat = self.activations[image] return feat, label def", "image, attr, obj = instance['image'], instance['attr'], \\ instance['obj'] key = (attr, obj) if", "= torch.stack(self.embeddings) self.data, self.labels = self.get_split_info() def get_split_info(self): data = torch.load(self.root + '/metadata.t7')", "else: self.attrs_embds = load_word_embeddings( self.emb_file, self.attrs) torch.save(self.attrs_embds, att_emb_file) obj_emb_file = join(root, 'objs_embs.t7') if", "vocab] embeds = {} for line in open(emb_file, 'r'): line = line.strip().split(' ')", "else: image, attr, obj = instance['image'], instance['attr'], \\ instance['obj'] key = (attr, obj)", "line[1:]))) embeds[line[0]] = wvec new_embs = [] for k in vocab: if '-'", "torch.stack([embeds[it] for it in ks]).mean(dim=0) else: emb = embeds[k] new_embs.append(emb) embeds = torch.stack(new_embs)", "+ tr_subs + ts_subs))) else: tr_attrs, tr_objs, tr_pairs = parse_pairs( '{}/{}/train_pairs.txt'.format(self.root, self.split)) ts_attrs,", "embeddings if self.emb_file is not None: att_emb_file = join(root, 'attrs_embs.t7') if exists(att_emb_file): self.attrs_embds", "attributes are now the `verbs` and the subjects and objects # share the", "\\ self.parse_split() assert len(set(self.train_pairs) & set(self.test_pairs)) == 0, \\ 'train and test are", "= [torch.cat([ self.objs_embds[self.objs.index(sub)], self.attrs_embds[self.attrs.index(verb)], self.objs_embds[self.objs.index(obj)]]) for (sub, verb, obj) in self.curr_pairs] else: self.embeddings", "load_word_embeddings(emb_file, vocab): vocab = [v.lower() for v in vocab] embeds = {} for", "== 0, \\ 'train and test are not mutually exclusive' # load pretrained", "= zip(*pairs) return subs, verbs, objs, pairs else: attrs, objs = zip(*pairs) return", "= [] for k in vocab: if '-' in k: ks = k.split('-')", "torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]] = wvec new_embs = [] for k in vocab: if", "is used in StanfordVRD self.feat = feat feat_file = '{}/{}_features.t7'.format(root, feat) activation_data =", "activation_data['features'].size(1) # load splits self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs = \\ self.parse_split() assert", "'r'): line = line.strip().split(' ') wvec = torch.FloatTensor(list(map(float, line[1:]))) embeds[line[0]] = wvec new_embs", "= [], [] for instance in data: if self.triple: image, sub, verb, obj", "self.labels = self.get_split_info() def get_split_info(self): data = torch.load(self.root + '/metadata.t7') images, targets =" ]
[ "skip self.version = \"0.0.1\" self.id = \"BPG\" # PID isValidDoiCheck = IsValidDoiCheck() doiResolvesCheck", "== [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return False for c in e.checks: if c.name.startswith(\"Contributors\"): return True", ") self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped ) ) def allow_type_to_enforce_institution(checks, pid):", "r2 = checks[1].get_last_result(pid) if 0 in (len(r1.outcome), len(r2.outcome)): return 0 dups = 0", "None if dups == 0: return 1 else: return dupsWithInfo/dups self.add_evaluation( FunctionEvaluation( [datesTypeCheck,", "None] ) ) # MIXED def rightsHolderIfRightsClosed(checks, pid): rightsAreOpen = checks[0].get_last_result(pid).outcome contributorsType =", "descriptionsLengthCheck = DescriptionsLengthCheck() descriptionsLanguageCheck = DescriptionsLanguageCheck() descriptionsTypeCheck = DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck], 1,", "r2.outcome[idx] is not None if dups == 0: return 1 else: return dupsWithInfo/dups", "benchmark is inspired by the DataCite best practice guide (DOI: 10.5281/zenodo.3559799) \"\"\" def", "\"0.0.1\" self.id = \"BPG\" # PID isValidDoiCheck = IsValidDoiCheck() doiResolvesCheck = DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck]))", "the DataCite best practice guide (DOI: 10.5281/zenodo.3559799) \"\"\" def __init__(self, name=None): Benchmark.__init__(self, \"Best", "[datesTypeCheck, datesInformationCheck], duplicatesHaveInformation ) ) # LANGUAGE languageSpecifiedCheck = LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED", "def rightsHolderIfRightsClosed(checks, pid): rightsAreOpen = checks[0].get_last_result(pid).outcome contributorsType = checks[1].get_last_result(pid).outcome if rightsAreOpen: return 1", "\\ DatesTypeCheck, \\ DescriptionsTypeCheck, \\ DescriptionsLanguageCheck, \\ DescriptionsLengthCheck, \\ DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck, \\", "DatesIssuedYearCheck() datesInformationCheck = DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"]) ) def publishedEqualsIssued(checks, pid): return", "dupsWithInfo += 1 dateTypesHasInformation[t] = r2.outcome[idx] is not None if dups == 0:", "= SizesNumberCheck() sizesByteSizeCheck = SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1) ) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True,", "2.0 License # # This file contains code related to the DataCite best", "\\ FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck, \\ PublicationYearCheck, \\ RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck, \\ RightsAreOpenCheck, \\", "enumerate(isInstitution): if inst: continue if booleanCheckResult[idx]: evaluation += 1/newTotal return evaluation self.add_evaluation( FunctionEvaluation(", ") ) def duplicatesHaveInformation(checks, pid): r1 = checks[0].get_last_result(pid) r2 = checks[1].get_last_result(pid) if 0", "inst in enumerate(isInstitution): if inst: if contributorTypes[idx] == \"HostingInstitution\": evaluation += 1/len(isInstitution) else:", "FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) # VERSION versionSpecifiedCheck = VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS rightsHasAtLeastOneLicenseCheck", "in contributorsType: return 1 else: return 0 self.add_evaluation( FunctionEvaluation( [rightsAreOpenCheck, contributorsTypeCheck], rightsHolderIfRightsClosed )", "= IsValidDoiCheck() doiResolvesCheck = DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR creatorsOrcidCheck = CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck", "ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"] ) ) self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\", \"Other\", None] )", "def allow_type_to_enforce_institution(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome contributorTypes = checks[1].get_last_result(pid).outcome for", "len(r2.outcome)): return 0 dups = 0 dupsWithInfo = 0 dateTypesHasInformation = {} for", "and r2.outcome[idx] is not None: dupsWithInfo += 1 dateTypesHasInformation[t] = r2.outcome[idx] is not", "def allow_person_related_tests_to_be_skipped(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome booleanCheckResult = checks[1].get_last_result(pid).outcome newTotal", "# MIXED def rightsHolderIfRightsClosed(checks, pid): rightsAreOpen = checks[0].get_last_result(pid).outcome contributorsType = checks[1].get_last_result(pid).outcome if rightsAreOpen:", "breadp.evaluations import \\ ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation, \\ Evaluation, \\", "subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck]))", "type(c).__name__ == \"VersionSpecifiedCheck\": return True # Contributors are optional if len(rdp.metadata.contributors) == 0:", ") self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) # TITLE titlesJustAFileNameCheck = TitlesJustAFileNameCheck() titlesTypeCheck = TitlesTypeCheck() titlesLanguageCheck", "types if rdp.metadata.type in (\"Image\", \"PhysicalObject\"): for c in e.checks: if type(c).__name__ ==", "checks[0].get_last_result(pid).outcome contributorTypes = checks[1].get_last_result(pid).outcome for idx, inst in enumerate(isInstitution): if inst: if contributorTypes[idx]", "\"DataCurator\", \"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\", \"ProjectMember\", \"Researcher\", \"RightsHolder\", \"WorkPackageLeader\" ], ) ) # DATES", "FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck, \\ PublicationYearCheck, \\ RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck, \\ RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck,", ") # SIZE sizesNumberCheck = SizesNumberCheck() sizesByteSizeCheck = SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1) )", "= VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck =", "== 0: for c in e.checks: if type(c).__name__.startswith(\"RelatedResource\"): return True return False class", "related to the DataCite best practice guide benchmark # # Basis for this", "= TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None, 1 ) ) self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck],", "self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR contributorsOrcidCheck = ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck", "contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped ) ) self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped )", "[descriptionsLengthCheck], 1, 300 ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"] ) ) self.add_evaluation( ContainsAllEvaluation(", "\"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\", \"References\", \"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\" ] )", "if dateTypesHasInformation.get(t) is not None: dups += 1 if dateTypesHasInformation[t] and r2.outcome[idx] is", "else: evaluation += 1/len(isInstitution) return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsTypeCheck ], allow_type_to_enforce_institution", "dupsWithInfo/dups self.add_evaluation( FunctionEvaluation( [datesTypeCheck, datesInformationCheck], duplicatesHaveInformation ) ) # LANGUAGE languageSpecifiedCheck = LanguageSpecifiedCheck()", "ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"] ) ) # SUBJECT subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck() subjectsNumberCheck = SubjectsNumberCheck()", ") # FORMAT formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) # VERSION versionSpecifiedCheck =", "ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck, \\ DatesInformationCheck, \\ DatesIssuedYearCheck, \\ DatesTypeCheck, \\ DescriptionsTypeCheck, \\ DescriptionsLanguageCheck,", "TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR contributorsOrcidCheck =", "contributorTypes = checks[1].get_last_result(pid).outcome for idx, inst in enumerate(isInstitution): if inst: if contributorTypes[idx] ==", "evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome booleanCheckResult = checks[1].get_last_result(pid).outcome newTotal = isInstitution.count(False) for", "best practice guide benchmark # # Basis for this benchmark: 10.5281/zenodo.3559800 # ################################################################################", "self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck], [ \"Describes\", \"IsDescribedBy\", \"HasPart\", \"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\",", ") self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"] ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"] ) )", "contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped ) ) def allow_type_to_enforce_institution(checks, pid): evaluation = 0 isInstitution =", "def duplicatesHaveInformation(checks, pid): r1 = checks[0].get_last_result(pid) r2 = checks[1].get_last_result(pid) if 0 in (len(r1.outcome),", "== \"LanguageSpecifiedCheck\": return True # Version is optional if rdp.metadata.version is None: for", "def __init__(self, name=None): Benchmark.__init__(self, \"Best Practice Benchmark\") self.skip = skip self.version = \"0.0.1\"", "in enumerate(r1.outcome): if dateTypesHasInformation.get(t) is not None: dups += 1 if dateTypesHasInformation[t] and", "\\ RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck, \\ RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck, \\", "\"References\", \"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\" ] ) ) # SIZE", "allow_type_to_enforce_institution ) ) self.add_evaluation( InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\", \"DataCurator\", \"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\", \"ProjectMember\",", "\"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\", \"References\", \"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\"", "TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) # DESCRIPTIONS descriptionsNumberCheck = DescriptionsNumberCheck() descriptionsLengthCheck = DescriptionsLengthCheck() descriptionsLanguageCheck = DescriptionsLanguageCheck()", ") ) # FORMAT formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) # VERSION versionSpecifiedCheck", "are optional if len(rdp.metadata.contributors) == 0: # if the license is non-open, we", "1 elif \"RightsHolder\" in contributorsType: return 1 else: return 0 self.add_evaluation( FunctionEvaluation( [rightsAreOpenCheck,", "allow_person_related_tests_to_be_skipped ) ) self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped ) ) def", "len(rdp.metadata.contributors) == 0: # if the license is non-open, we need to check", "for these types if rdp.metadata.type in (\"Image\", \"PhysicalObject\"): for c in e.checks: if", "if 0 in (len(r1.outcome), len(r2.outcome)): return 0 dups = 0 dupsWithInfo = 0", "= LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED RESOURCES relatedResourceMetadataCheck = RelatedResourceMetadataCheck() relatedResourceTypeCheck = RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck]))", "= checks[1].get_last_result(pid) if 0 in (len(r1.outcome), len(r2.outcome)): return 0 dups = 0 dupsWithInfo", "= ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck() contributorsTypeCheck = ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks, pid): evaluation =", "LANGUAGE languageSpecifiedCheck = LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED RESOURCES relatedResourceMetadataCheck = RelatedResourceMetadataCheck() relatedResourceTypeCheck =", "\\ ContributorsTypeCheck, \\ DatesInformationCheck, \\ DatesIssuedYearCheck, \\ DatesTypeCheck, \\ DescriptionsTypeCheck, \\ DescriptionsLanguageCheck, \\", "True # Related Resources are optional if len(rdp.metadata.relatedResources) == 0: for c in", "\"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\", \"References\", \"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\",", "1, sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR contributorsOrcidCheck = ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck()", "contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck() contributorsTypeCheck = ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks, pid): evaluation = 0 isInstitution", "IsValidDoiCheck, DoiResolvesCheck from breadp.checks.metadata import \\ CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck,", "sense for these types if rdp.metadata.type in (\"Image\", \"PhysicalObject\"): for c in e.checks:", "[\"Created\", \"Collected\"]) ) def publishedEqualsIssued(checks, pid): return checks[0].get_last_result(pid).outcome \\ == checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation(", "FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped ) ) self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck", "SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck, \\ TitlesLanguageCheck, \\ TitlesTypeCheck, \\ VersionSpecifiedCheck", "inst: continue if booleanCheckResult[idx]: evaluation += 1/newTotal return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck,", "\"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\" ] ) ) # SIZE sizesNumberCheck = SizesNumberCheck() sizesByteSizeCheck", "\"DataCollector\", \"DataCurator\", \"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\", \"ProjectMember\", \"Researcher\", \"RightsHolder\", \"WorkPackageLeader\" ], ) ) #", "bool: # Evaluating language make no sense for these types if rdp.metadata.type in", "self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck = RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck]))", "= checks[1].get_last_result(pid).outcome for idx, inst in enumerate(isInstitution): if inst: if contributorTypes[idx] == \"HostingInstitution\":", ") ) # SIZE sizesNumberCheck = SizesNumberCheck() sizesByteSizeCheck = SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1)", "DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck, \\ PublicationYearCheck, \\ RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck, \\ RightsAreOpenCheck,", "in e.checks: if type(c).__name__ == \"LanguageSpecifiedCheck\": return True # Version is optional if", "CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) )", "2020 # # Apache 2.0 License # # This file contains code related", "= \"BPG\" # PID isValidDoiCheck = IsValidDoiCheck() doiResolvesCheck = DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) #", "rightsAreOpenCheck = RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) # DESCRIPTIONS descriptionsNumberCheck = DescriptionsNumberCheck() descriptionsLengthCheck", "[publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued, ) ) def duplicatesHaveInformation(checks, pid): r1 = checks[0].get_last_result(pid) r2 =", "InListEvaluation, \\ IsBetweenEvaluation, \\ IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation from rdp", "self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped ) ) def allow_type_to_enforce_institution(checks, pid): evaluation", "\\ DescriptionsTypeCheck, \\ DescriptionsLanguageCheck, \\ DescriptionsLengthCheck, \\ DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck, \\", "dupsWithInfo = 0 dateTypesHasInformation = {} for idx, t in enumerate(r1.outcome): if dateTypesHasInformation.get(t)", "practice guide benchmark # # Basis for this benchmark: 10.5281/zenodo.3559800 # ################################################################################ import", "from breadp.evaluations import \\ ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation, \\ Evaluation,", "practice guide (DOI: 10.5281/zenodo.3559799) \"\"\" def __init__(self, name=None): Benchmark.__init__(self, \"Best Practice Benchmark\") self.skip", "FunctionEvaluation, \\ InListEvaluation, \\ IsBetweenEvaluation, \\ IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation", "Practice Benchmark\") self.skip = skip self.version = \"0.0.1\" self.id = \"BPG\" # PID", "DescriptionsLengthCheck, \\ DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck, \\ PublicationYearCheck, \\ RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck,", "r2.outcome[idx] is not None: dupsWithInfo += 1 dateTypesHasInformation[t] = r2.outcome[idx] is not None", "dups = 0 dupsWithInfo = 0 dateTypesHasInformation = {} for idx, t in", "optional if len(rdp.metadata.relatedResources) == 0: for c in e.checks: if type(c).__name__.startswith(\"RelatedResource\"): return True", "= DatesIssuedYearCheck() datesInformationCheck = DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"]) ) def publishedEqualsIssued(checks, pid):", "-> bool: # Evaluating language make no sense for these types if rdp.metadata.type", "CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck, \\ DatesInformationCheck,", "import sys from breadp.benchmarks import Benchmark from breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck from breadp.checks.metadata", "SubjectsNumberCheck, \\ SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck, \\ TitlesLanguageCheck, \\ TitlesTypeCheck,", "if len(rdp.metadata.contributors) == 0: # if the license is non-open, we need to", "IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR contributorsOrcidCheck = ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck =", "\\ Evaluation, \\ FalseEvaluation, \\ FunctionEvaluation, \\ InListEvaluation, \\ IsBetweenEvaluation, \\ IsIdenticalToEvaluation, \\", "\\ == checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued, ) ) def duplicatesHaveInformation(checks, pid):", "{} for idx, t in enumerate(r1.outcome): if dateTypesHasInformation.get(t) is not None: dups +=", "DatesIssuedYearCheck, \\ DatesTypeCheck, \\ DescriptionsTypeCheck, \\ DescriptionsLanguageCheck, \\ DescriptionsLengthCheck, \\ DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck,", "== 0: return 1 else: return dupsWithInfo/dups self.add_evaluation( FunctionEvaluation( [datesTypeCheck, datesInformationCheck], duplicatesHaveInformation )", "self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped ) ) self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck,", "Evaluation, \\ FalseEvaluation, \\ FunctionEvaluation, \\ InListEvaluation, \\ IsBetweenEvaluation, \\ IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation,", "\\ CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck, \\", "else: return dupsWithInfo/dups self.add_evaluation( FunctionEvaluation( [datesTypeCheck, datesInformationCheck], duplicatesHaveInformation ) ) # LANGUAGE languageSpecifiedCheck", "TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) # TITLE titlesJustAFileNameCheck = TitlesJustAFileNameCheck() titlesTypeCheck = TitlesTypeCheck()", ") self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR contributorsOrcidCheck = ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck =", "is optional if rdp.metadata.version is None: for c in e.checks: if type(c).__name__ ==", "Rdp def skip(e: Evaluation, rdp: Rdp) -> bool: # Evaluating language make no", "contributorsTypeCheck = ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome booleanCheckResult", "return True # Related Resources are optional if len(rdp.metadata.relatedResources) == 0: for c", "1 else: return dupsWithInfo/dups self.add_evaluation( FunctionEvaluation( [datesTypeCheck, datesInformationCheck], duplicatesHaveInformation ) ) # LANGUAGE", "[titlesTypeCheck], None, 1 ) ) self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"] ) ) # SUBJECT", "\\ DatesIssuedYearCheck, \\ DatesTypeCheck, \\ DescriptionsTypeCheck, \\ DescriptionsLanguageCheck, \\ DescriptionsLengthCheck, \\ DescriptionsNumberCheck, \\", "SIZE sizesNumberCheck = SizesNumberCheck() sizesByteSizeCheck = SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1) ) self.add_evaluation( ContainsItemExactlyNTimesEvaluation(", "+= 1 dateTypesHasInformation[t] = r2.outcome[idx] is not None if dups == 0: return", "is not None if dups == 0: return 1 else: return dupsWithInfo/dups self.add_evaluation(", "self.add_evaluation( FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued, ) ) def duplicatesHaveInformation(checks, pid): r1 = checks[0].get_last_result(pid)", "[ \"Describes\", \"IsDescribedBy\", \"HasPart\", \"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\",", "sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR contributorsOrcidCheck = ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck", "SizesNumberCheck() sizesByteSizeCheck = SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1) ) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True, 1", "\\ ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation, \\ Evaluation, \\ FalseEvaluation, \\ FunctionEvaluation, \\ InListEvaluation, \\", "evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome contributorTypes = checks[1].get_last_result(pid).outcome for idx, inst in", "\\ FalseEvaluation, \\ FunctionEvaluation, \\ InListEvaluation, \\ IsBetweenEvaluation, \\ IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation, \\", "################################################################################ import sys from breadp.benchmarks import Benchmark from breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck from", "from rdp import Rdp def skip(e: Evaluation, rdp: Rdp) -> bool: # Evaluating", "DatesTypeCheck() publicationYearCheck = PublicationYearCheck() datesIssuedYearCheck = DatesIssuedYearCheck() datesInformationCheck = DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\",", "not None: dupsWithInfo += 1 dateTypesHasInformation[t] = r2.outcome[idx] is not None if dups", "] ) ) # SIZE sizesNumberCheck = SizesNumberCheck() sizesByteSizeCheck = SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck],", "10.5281/zenodo.3559800 # ################################################################################ import sys from breadp.benchmarks import Benchmark from breadp.checks.pid import IsValidDoiCheck,", "(DOI: 10.5281/zenodo.3559799) \"\"\" def __init__(self, name=None): Benchmark.__init__(self, \"Best Practice Benchmark\") self.skip = skip", "SubjectsNumberCheck() subjectsHaveDdcCheck = SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1,", "ContributorsContainInstitutionsCheck() contributorsTypeCheck = ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome", "if contributorTypes[idx] == \"HostingInstitution\": evaluation += 1/len(isInstitution) else: evaluation += 1/len(isInstitution) return evaluation", "Rightsholder! if [c.name for c in e.checks] == [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return False for", "rdp.metadata.version is None: for c in e.checks: if type(c).__name__ == \"VersionSpecifiedCheck\": return True", "= SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max) )", "\"WorkPackageLeader\" ], ) ) # DATES datesTypeCheck = DatesTypeCheck() publicationYearCheck = PublicationYearCheck() datesIssuedYearCheck", "= 0 dupsWithInfo = 0 dateTypesHasInformation = {} for idx, t in enumerate(r1.outcome):", "DataCite best practice guide (DOI: 10.5281/zenodo.3559799) \"\"\" def __init__(self, name=None): Benchmark.__init__(self, \"Best Practice", ") self.add_evaluation( InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\", \"DataCurator\", \"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\", \"ProjectMember\", \"Researcher\", \"RightsHolder\",", "publishedEqualsIssued, ) ) def duplicatesHaveInformation(checks, pid): r1 = checks[0].get_last_result(pid) r2 = checks[1].get_last_result(pid) if", "IsBetweenEvaluation, \\ IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation from rdp import Rdp", "def publishedEqualsIssued(checks, pid): return checks[0].get_last_result(pid).outcome \\ == checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued,", "e.checks] == [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return False for c in e.checks: if c.name.startswith(\"Contributors\"): return", "\"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\", \"References\", \"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\" ]", "self.id = \"BPG\" # PID isValidDoiCheck = IsValidDoiCheck() doiResolvesCheck = DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck]))", "self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR contributorsOrcidCheck = ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck() contributorsTypeCheck", "is None: for c in e.checks: if type(c).__name__ == \"VersionSpecifiedCheck\": return True #", "= {} for idx, t in enumerate(r1.outcome): if dateTypesHasInformation.get(t) is not None: dups", "FunctionEvaluation( [datesTypeCheck, datesInformationCheck], duplicatesHaveInformation ) ) # LANGUAGE languageSpecifiedCheck = LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) #", "\"Describes\", \"IsDescribedBy\", \"HasPart\", \"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\", \"References\",", "ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck, \\ DatesInformationCheck, \\ DatesIssuedYearCheck, \\ DatesTypeCheck,", "return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped ) ) self.add_evaluation( FunctionEvaluation(", "# CONTRIBUTOR contributorsOrcidCheck = ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck() contributorsTypeCheck =", "self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED RESOURCES relatedResourceMetadataCheck = RelatedResourceMetadataCheck() relatedResourceTypeCheck = RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation(", "datesIssuedYearCheck = DatesIssuedYearCheck() datesInformationCheck = DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"]) ) def publishedEqualsIssued(checks,", "guide benchmark # # Basis for this benchmark: 10.5281/zenodo.3559800 # ################################################################################ import sys", "for c in e.checks: if c.name.startswith(\"Contributors\"): return True # Related Resources are optional", "sys.float_info.max ) ) self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck], 1, 300 ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck],", "Evaluating language make no sense for these types if rdp.metadata.type in (\"Image\", \"PhysicalObject\"):", "return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsTypeCheck ], allow_type_to_enforce_institution ) ) self.add_evaluation( InListEvaluation(", "1 if dateTypesHasInformation[t] and r2.outcome[idx] is not None: dupsWithInfo += 1 dateTypesHasInformation[t] =", ") self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR contributorsOrcidCheck = ContributorsOrcidCheck()", "SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck, \\ TitlesLanguageCheck, \\ TitlesTypeCheck, \\ VersionSpecifiedCheck from breadp.evaluations import \\", "True # Version is optional if rdp.metadata.version is None: for c in e.checks:", "ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"]) ) def publishedEqualsIssued(checks, pid): return checks[0].get_last_result(pid).outcome \\ == checks[1].get_last_result(pid).outcome self.add_evaluation(", "\\ CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck, \\", "# Related Resources are optional if len(rdp.metadata.relatedResources) == 0: for c in e.checks:", "if type(c).__name__.startswith(\"RelatedResource\"): return True return False class BPGBenchmark(Benchmark): \"\"\" This benchmark is inspired", "continue if booleanCheckResult[idx]: evaluation += 1/newTotal return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsOrcidCheck", "# Apache 2.0 License # # This file contains code related to the", "+= 1/newTotal return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped ) )", "This benchmark is inspired by the DataCite best practice guide (DOI: 10.5281/zenodo.3559799) \"\"\"", "ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True, 1 ) ) # FORMAT formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck])", "formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) # VERSION versionSpecifiedCheck = VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) #", "[\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return False for c in e.checks: if c.name.startswith(\"Contributors\"): return True #", "if booleanCheckResult[idx]: evaluation += 1/newTotal return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsOrcidCheck ],", "[relatedResourceTypeCheck], [ \"Describes\", \"IsDescribedBy\", \"HasPart\", \"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\",", "creatorsOrcidCheck = CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation(", "\"PhysicalObject\"): for c in e.checks: if type(c).__name__ == \"LanguageSpecifiedCheck\": return True # Version", "for c in e.checks: if type(c).__name__ == \"VersionSpecifiedCheck\": return True # Contributors are", "self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR creatorsOrcidCheck = CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck()", "not None: dups += 1 if dateTypesHasInformation[t] and r2.outcome[idx] is not None: dupsWithInfo", "# FORMAT formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) # VERSION versionSpecifiedCheck = VersionSpecifiedCheck()", "rightsAreOpen = checks[0].get_last_result(pid).outcome contributorsType = checks[1].get_last_result(pid).outcome if rightsAreOpen: return 1 elif \"RightsHolder\" in", "checks[0].get_last_result(pid).outcome booleanCheckResult = checks[1].get_last_result(pid).outcome newTotal = isInstitution.count(False) for idx, inst in enumerate(isInstitution): if", "pid): r1 = checks[0].get_last_result(pid) r2 = checks[1].get_last_result(pid) if 0 in (len(r1.outcome), len(r2.outcome)): return", "ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation, \\ Evaluation, \\ FalseEvaluation, \\ FunctionEvaluation, \\ InListEvaluation,", "import Benchmark from breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck from breadp.checks.metadata import \\ CreatorsOrcidCheck, \\", ") # DESCRIPTIONS descriptionsNumberCheck = DescriptionsNumberCheck() descriptionsLengthCheck = DescriptionsLengthCheck() descriptionsLanguageCheck = DescriptionsLanguageCheck() descriptionsTypeCheck", "CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck, \\ DatesInformationCheck, \\ DatesIssuedYearCheck,", "self.add_evaluation( FunctionEvaluation( [datesTypeCheck, datesInformationCheck], duplicatesHaveInformation ) ) # LANGUAGE languageSpecifiedCheck = LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck]))", "import \\ CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck,", "Resources are optional if len(rdp.metadata.relatedResources) == 0: for c in e.checks: if type(c).__name__.startswith(\"RelatedResource\"):", "\"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\" ] ) ) # SIZE sizesNumberCheck =", "SizesNumberCheck, \\ SizesByteSizeCheck, \\ SubjectsNumberCheck, \\ SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck,", "contributorsOrcidCheck = ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck() contributorsTypeCheck = ContributorsTypeCheck() def", "Version is optional if rdp.metadata.version is None: for c in e.checks: if type(c).__name__", "1/newTotal return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped ) ) self.add_evaluation(", "contributorsContainInstitutionsCheck, contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped ) ) self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped", "newTotal = isInstitution.count(False) for idx, inst in enumerate(isInstitution): if inst: continue if booleanCheckResult[idx]:", "self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck], 1, sys.float_info.max ) ) self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck], 1, 300 )", "\"HostingInstitution\": evaluation += 1/len(isInstitution) else: evaluation += 1/len(isInstitution) return evaluation self.add_evaluation( FunctionEvaluation( [", "contributorsTypeCheck ], allow_type_to_enforce_institution ) ) self.add_evaluation( InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\", \"DataCurator\", \"HostingInstitution\", \"ProjectLeader\",", "self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsTypeCheck ], allow_type_to_enforce_institution ) ) self.add_evaluation( InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\",", "= RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck = RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) # DESCRIPTIONS descriptionsNumberCheck =", "= SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1) ) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True, 1 ) )", "RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck, \\ SizesByteSizeCheck, \\ SubjectsNumberCheck, \\ SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck,", "RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck, \\ RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck, \\ SizesByteSizeCheck,", "these types if rdp.metadata.type in (\"Image\", \"PhysicalObject\"): for c in e.checks: if type(c).__name__", ") ) # SUBJECT subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck() subjectsNumberCheck = SubjectsNumberCheck() subjectsHaveDdcCheck = SubjectsHaveDdcCheck()", "allow_person_related_tests_to_be_skipped ) ) def allow_type_to_enforce_institution(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome contributorTypes", "= 0 dateTypesHasInformation = {} for idx, t in enumerate(r1.outcome): if dateTypesHasInformation.get(t) is", "in e.checks: if type(c).__name__.startswith(\"RelatedResource\"): return True return False class BPGBenchmark(Benchmark): \"\"\" This benchmark", "publishedEqualsIssued(checks, pid): return checks[0].get_last_result(pid).outcome \\ == checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued, )", "\\ SizesNumberCheck, \\ SizesByteSizeCheck, \\ SubjectsNumberCheck, \\ SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck, \\", "1, sys.float_info.max ) ) self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck], 1, 300 ) ) self.add_evaluation( ContainsAllEvaluation(", "DescriptionsTypeCheck, \\ DescriptionsLanguageCheck, \\ DescriptionsLengthCheck, \\ DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck, \\ PublicationYearCheck,", "SizesByteSizeCheck, \\ SubjectsNumberCheck, \\ SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck, \\ TitlesLanguageCheck,", "0: for c in e.checks: if type(c).__name__.startswith(\"RelatedResource\"): return True return False class BPGBenchmark(Benchmark):", "\"IsDocumentedBy\" ] ) ) # SIZE sizesNumberCheck = SizesNumberCheck() sizesByteSizeCheck = SizesByteSizeCheck() self.add_evaluation(", ") self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"] ) ) self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\", \"Other\",", "(len(r1.outcome), len(r2.outcome)): return 0 dups = 0 dupsWithInfo = 0 dateTypesHasInformation = {}", "== \"HostingInstitution\": evaluation += 1/len(isInstitution) else: evaluation += 1/len(isInstitution) return evaluation self.add_evaluation( FunctionEvaluation(", "300 ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"] ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"]", "__init__(self, name=None): Benchmark.__init__(self, \"Best Practice Benchmark\") self.skip = skip self.version = \"0.0.1\" self.id", "in (len(r1.outcome), len(r2.outcome)): return 0 dups = 0 dupsWithInfo = 0 dateTypesHasInformation =", "license is non-open, we need to check Rightsholder! if [c.name for c in", ") ) # MIXED def rightsHolderIfRightsClosed(checks, pid): rightsAreOpen = checks[0].get_last_result(pid).outcome contributorsType = checks[1].get_last_result(pid).outcome", "CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) # TITLE titlesJustAFileNameCheck", "# Basis for this benchmark: 10.5281/zenodo.3559800 # ################################################################################ import sys from breadp.benchmarks import", "checks[0].get_last_result(pid).outcome contributorsType = checks[1].get_last_result(pid).outcome if rightsAreOpen: return 1 elif \"RightsHolder\" in contributorsType: return", "ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"] ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"] ) ) self.add_evaluation( DoesNotContainEvaluation(", "RESOURCES relatedResourceMetadataCheck = RelatedResourceMetadataCheck() relatedResourceTypeCheck = RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck], [ \"Describes\",", "duplicatesHaveInformation(checks, pid): r1 = checks[0].get_last_result(pid) r2 = checks[1].get_last_result(pid) if 0 in (len(r1.outcome), len(r2.outcome)):", "datesInformationCheck = DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"]) ) def publishedEqualsIssued(checks, pid): return checks[0].get_last_result(pid).outcome", "RIGHTS rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck = RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck])", "FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped ) ) def allow_type_to_enforce_institution(checks, pid): evaluation =", "allow_person_related_tests_to_be_skipped(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome booleanCheckResult = checks[1].get_last_result(pid).outcome newTotal =", "\"HasPart\", \"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\", \"References\", \"IsReferencedBy\", \"IsVariantFormOf\",", "DESCRIPTIONS descriptionsNumberCheck = DescriptionsNumberCheck() descriptionsLengthCheck = DescriptionsLengthCheck() descriptionsLanguageCheck = DescriptionsLanguageCheck() descriptionsTypeCheck = DescriptionsTypeCheck()", "versionSpecifiedCheck = VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck", "SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1) ) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True, 1 ) ) #", "titlesTypeCheck = TitlesTypeCheck() titlesLanguageCheck = TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None, 1 )", "optional if rdp.metadata.version is None: for c in e.checks: if type(c).__name__ == \"VersionSpecifiedCheck\":", "for c in e.checks: if type(c).__name__.startswith(\"RelatedResource\"): return True return False class BPGBenchmark(Benchmark): \"\"\"", ") self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck], 1, 300 ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"] )", "# Copyright: <NAME> 2020 # # Apache 2.0 License # # This file", "TitlesTypeCheck() titlesLanguageCheck = TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None, 1 ) ) self.add_evaluation(", "0 isInstitution = checks[0].get_last_result(pid).outcome contributorTypes = checks[1].get_last_result(pid).outcome for idx, inst in enumerate(isInstitution): if", "FORMAT formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) # VERSION versionSpecifiedCheck = VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck]))", "MIXED def rightsHolderIfRightsClosed(checks, pid): rightsAreOpen = checks[0].get_last_result(pid).outcome contributorsType = checks[1].get_last_result(pid).outcome if rightsAreOpen: return", "\\ SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck, \\ TitlesLanguageCheck, \\ TitlesTypeCheck, \\", "# ################################################################################ import sys from breadp.benchmarks import Benchmark from breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck", "isInstitution.count(False) for idx, inst in enumerate(isInstitution): if inst: continue if booleanCheckResult[idx]: evaluation +=", "from breadp.checks.metadata import \\ CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck,", "0 dupsWithInfo = 0 dateTypesHasInformation = {} for idx, t in enumerate(r1.outcome): if", "\\ RelatedResourceTypeCheck, \\ RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck, \\ SizesByteSizeCheck, \\", "# RELATED RESOURCES relatedResourceMetadataCheck = RelatedResourceMetadataCheck() relatedResourceTypeCheck = RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck],", "+= 1/len(isInstitution) return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsTypeCheck ], allow_type_to_enforce_institution ) )", "Basis for this benchmark: 10.5281/zenodo.3559800 # ################################################################################ import sys from breadp.benchmarks import Benchmark", "[descriptionsTypeCheck], [\"Abstract\"] ) ) self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\", \"Other\", None] ) )", "c in e.checks: if type(c).__name__.startswith(\"RelatedResource\"): return True return False class BPGBenchmark(Benchmark): \"\"\" This", "by the DataCite best practice guide (DOI: 10.5281/zenodo.3559799) \"\"\" def __init__(self, name=None): Benchmark.__init__(self,", "[sizesByteSizeCheck], True, 1 ) ) # FORMAT formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) )", "(\"Image\", \"PhysicalObject\"): for c in e.checks: if type(c).__name__ == \"LanguageSpecifiedCheck\": return True #", "SubjectsAreQualifiedCheck() subjectsNumberCheck = SubjectsNumberCheck() subjectsHaveDdcCheck = SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) )", "check Rightsholder! if [c.name for c in e.checks] == [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return False", "creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation(", "DescriptionsLanguageCheck, \\ DescriptionsLengthCheck, \\ DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck, \\ PublicationYearCheck, \\ RelatedResourceMetadataCheck,", ") def allow_type_to_enforce_institution(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome contributorTypes = checks[1].get_last_result(pid).outcome", "this benchmark: 10.5281/zenodo.3559800 # ################################################################################ import sys from breadp.benchmarks import Benchmark from breadp.checks.pid", "self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) # VERSION versionSpecifiedCheck = VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS rightsHasAtLeastOneLicenseCheck =", "CREATOR creatorsOrcidCheck = CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) )", "# SIZE sizesNumberCheck = SizesNumberCheck() sizesByteSizeCheck = SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1) ) self.add_evaluation(", "DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\", \"Other\", None] ) ) # MIXED def rightsHolderIfRightsClosed(checks, pid):", "e.checks: if c.name.startswith(\"Contributors\"): return True # Related Resources are optional if len(rdp.metadata.relatedResources) ==", "self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True, 1 ) ) # FORMAT formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck() self.add_evaluation(", "is not None: dupsWithInfo += 1 dateTypesHasInformation[t] = r2.outcome[idx] is not None if", "return True return False class BPGBenchmark(Benchmark): \"\"\" This benchmark is inspired by the", "self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR contributorsOrcidCheck", "1 dateTypesHasInformation[t] = r2.outcome[idx] is not None if dups == 0: return 1", "rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck = RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) )", "\"TableOfContents\", \"Other\", None] ) ) # MIXED def rightsHolderIfRightsClosed(checks, pid): rightsAreOpen = checks[0].get_last_result(pid).outcome", "\"ContributorsTypeCheck\"]: return False for c in e.checks: if c.name.startswith(\"Contributors\"): return True # Related", "None: dupsWithInfo += 1 dateTypesHasInformation[t] = r2.outcome[idx] is not None if dups ==", "\"\"\" def __init__(self, name=None): Benchmark.__init__(self, \"Best Practice Benchmark\") self.skip = skip self.version =", "name=None): Benchmark.__init__(self, \"Best Practice Benchmark\") self.skip = skip self.version = \"0.0.1\" self.id =", "rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck = RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) # DESCRIPTIONS descriptionsNumberCheck", "DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck], 1, sys.float_info.max ) ) self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck], 1, 300", "in e.checks] == [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return False for c in e.checks: if c.name.startswith(\"Contributors\"):", "to check Rightsholder! if [c.name for c in e.checks] == [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return", "return True # Contributors are optional if len(rdp.metadata.contributors) == 0: # if the", "= isInstitution.count(False) for idx, inst in enumerate(isInstitution): if inst: continue if booleanCheckResult[idx]: evaluation", "\\ IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation from rdp import Rdp def", "LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED RESOURCES relatedResourceMetadataCheck = RelatedResourceMetadataCheck() relatedResourceTypeCheck = RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation(", "evaluation += 1/newTotal return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped )", "TrueEvaluation from rdp import Rdp def skip(e: Evaluation, rdp: Rdp) -> bool: #", "IsBetweenEvaluation( [descriptionsNumberCheck], 1, sys.float_info.max ) ) self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck], 1, 300 ) )", "1/len(isInstitution) else: evaluation += 1/len(isInstitution) return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsTypeCheck ],", "dateTypesHasInformation[t] and r2.outcome[idx] is not None: dupsWithInfo += 1 dateTypesHasInformation[t] = r2.outcome[idx] is", "isInstitution = checks[0].get_last_result(pid).outcome booleanCheckResult = checks[1].get_last_result(pid).outcome newTotal = isInstitution.count(False) for idx, inst in", "= 0 isInstitution = checks[0].get_last_result(pid).outcome booleanCheckResult = checks[1].get_last_result(pid).outcome newTotal = isInstitution.count(False) for idx,", "for idx, inst in enumerate(isInstitution): if inst: if contributorTypes[idx] == \"HostingInstitution\": evaluation +=", "import IsValidDoiCheck, DoiResolvesCheck from breadp.checks.metadata import \\ CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck, \\", "not None if dups == 0: return 1 else: return dupsWithInfo/dups self.add_evaluation( FunctionEvaluation(", "\\ SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck, \\ TitlesLanguageCheck, \\ TitlesTypeCheck, \\ VersionSpecifiedCheck from", "RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck], [ \"Describes\", \"IsDescribedBy\", \"HasPart\", \"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\", \"HasVersion\",", "IsBetweenEvaluation( [descriptionsLengthCheck], 1, 300 ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"] ) ) self.add_evaluation(", "for c in e.checks: if type(c).__name__ == \"LanguageSpecifiedCheck\": return True # Version is", "\\ RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck, \\ SizesByteSizeCheck, \\ SubjectsNumberCheck, \\ SubjectsHaveDdcCheck, \\", "self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) # DESCRIPTIONS descriptionsNumberCheck = DescriptionsNumberCheck() descriptionsLengthCheck = DescriptionsLengthCheck() descriptionsLanguageCheck", "optional if len(rdp.metadata.contributors) == 0: # if the license is non-open, we need", "rdp.metadata.type in (\"Image\", \"PhysicalObject\"): for c in e.checks: if type(c).__name__ == \"LanguageSpecifiedCheck\": return", "[contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\", \"DataCurator\", \"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\", \"ProjectMember\", \"Researcher\", \"RightsHolder\", \"WorkPackageLeader\" ], )", "RelatedResourceTypeCheck, \\ RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck, \\ SizesByteSizeCheck, \\ SubjectsNumberCheck,", "idx, t in enumerate(r1.outcome): if dateTypesHasInformation.get(t) is not None: dups += 1 if", "datesIssuedYearCheck], publishedEqualsIssued, ) ) def duplicatesHaveInformation(checks, pid): r1 = checks[0].get_last_result(pid) r2 = checks[1].get_last_result(pid)", "SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck, \\ TitlesLanguageCheck, \\ TitlesTypeCheck, \\ VersionSpecifiedCheck from breadp.evaluations", "the license is non-open, we need to check Rightsholder! if [c.name for c", "# # Apache 2.0 License # # This file contains code related to", "DoiResolvesCheck from breadp.checks.metadata import \\ CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck, \\", "], ) ) # DATES datesTypeCheck = DatesTypeCheck() publicationYearCheck = PublicationYearCheck() datesIssuedYearCheck =", "TitlesTypeCheck, \\ VersionSpecifiedCheck from breadp.evaluations import \\ ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation, \\", "r1 = checks[0].get_last_result(pid) r2 = checks[1].get_last_result(pid) if 0 in (len(r1.outcome), len(r2.outcome)): return 0", ") ) self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"] ) ) self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\",", "\"IsSourceOf\", \"IsDerivedFrom\", \"References\", \"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\" ] ) )", "True return False class BPGBenchmark(Benchmark): \"\"\" This benchmark is inspired by the DataCite", "guide (DOI: 10.5281/zenodo.3559799) \"\"\" def __init__(self, name=None): Benchmark.__init__(self, \"Best Practice Benchmark\") self.skip =", "VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck = RightsAreOpenCheck()", "self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) # DESCRIPTIONS descriptionsNumberCheck = DescriptionsNumberCheck() descriptionsLengthCheck = DescriptionsLengthCheck() descriptionsLanguageCheck =", "DoesNotContainEvaluation, \\ Evaluation, \\ FalseEvaluation, \\ FunctionEvaluation, \\ InListEvaluation, \\ IsBetweenEvaluation, \\ IsIdenticalToEvaluation,", "VersionSpecifiedCheck from breadp.evaluations import \\ ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation, \\", "# Contributors are optional if len(rdp.metadata.contributors) == 0: # if the license is", "DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR creatorsOrcidCheck = CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck =", "= 0 isInstitution = checks[0].get_last_result(pid).outcome contributorTypes = checks[1].get_last_result(pid).outcome for idx, inst in enumerate(isInstitution):", "None, 1 ) ) self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"] ) ) # SUBJECT subjectsAreQualifiedCheck", "0: # if the license is non-open, we need to check Rightsholder! if", "# VERSION versionSpecifiedCheck = VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck =", "\\ TitlesJustAFileNameCheck, \\ TitlesLanguageCheck, \\ TitlesTypeCheck, \\ VersionSpecifiedCheck from breadp.evaluations import \\ ContainsAllEvaluation,", "if inst: if contributorTypes[idx] == \"HostingInstitution\": evaluation += 1/len(isInstitution) else: evaluation += 1/len(isInstitution)", "# DATES datesTypeCheck = DatesTypeCheck() publicationYearCheck = PublicationYearCheck() datesIssuedYearCheck = DatesIssuedYearCheck() datesInformationCheck =", "TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) # VERSION versionSpecifiedCheck = VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck()", "ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome booleanCheckResult = checks[1].get_last_result(pid).outcome", ") ) def allow_type_to_enforce_institution(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome contributorTypes =", "DescriptionsLengthCheck() descriptionsLanguageCheck = DescriptionsLanguageCheck() descriptionsTypeCheck = DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck], 1, sys.float_info.max )", "DescriptionsLanguageCheck() descriptionsTypeCheck = DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck], 1, sys.float_info.max ) ) self.add_evaluation( IsBetweenEvaluation(", "IsValidDoiCheck() doiResolvesCheck = DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR creatorsOrcidCheck = CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck =", "== \"VersionSpecifiedCheck\": return True # Contributors are optional if len(rdp.metadata.contributors) == 0: #", "\\ SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck, \\ TitlesLanguageCheck, \\ TitlesTypeCheck, \\ VersionSpecifiedCheck from breadp.evaluations import", "0 dateTypesHasInformation = {} for idx, t in enumerate(r1.outcome): if dateTypesHasInformation.get(t) is not", "= RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck], [ \"Describes\", \"IsDescribedBy\", \"HasPart\", \"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\",", ") # VERSION versionSpecifiedCheck = VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck", "best practice guide (DOI: 10.5281/zenodo.3559799) \"\"\" def __init__(self, name=None): Benchmark.__init__(self, \"Best Practice Benchmark\")", ") self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"] ) ) # SUBJECT subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck() subjectsNumberCheck", "self.add_evaluation( InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\", \"DataCurator\", \"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\", \"ProjectMember\", \"Researcher\", \"RightsHolder\", \"WorkPackageLeader\"", "rightsAreOpen: return 1 elif \"RightsHolder\" in contributorsType: return 1 else: return 0 self.add_evaluation(", "TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation from rdp import Rdp def skip(e: Evaluation, rdp:", "\\ TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation from rdp import Rdp def skip(e: Evaluation, rdp: Rdp)", "to the DataCite best practice guide benchmark # # Basis for this benchmark:", "self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck], 1, 300 ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"] ) )", "subjectsNumberCheck = SubjectsNumberCheck() subjectsHaveDdcCheck = SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation(", "DatesTypeCheck, \\ DescriptionsTypeCheck, \\ DescriptionsLanguageCheck, \\ DescriptionsLengthCheck, \\ DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck,", "evaluation += 1/len(isInstitution) else: evaluation += 1/len(isInstitution) return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck,", "no sense for these types if rdp.metadata.type in (\"Image\", \"PhysicalObject\"): for c in", "need to check Rightsholder! if [c.name for c in e.checks] == [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]:", "= checks[0].get_last_result(pid) r2 = checks[1].get_last_result(pid) if 0 in (len(r1.outcome), len(r2.outcome)): return 0 dups", "DescriptionsNumberCheck() descriptionsLengthCheck = DescriptionsLengthCheck() descriptionsLanguageCheck = DescriptionsLanguageCheck() descriptionsTypeCheck = DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck],", "RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) # DESCRIPTIONS descriptionsNumberCheck = DescriptionsNumberCheck() descriptionsLengthCheck = DescriptionsLengthCheck()", "\\ TrueEvaluation from rdp import Rdp def skip(e: Evaluation, rdp: Rdp) -> bool:", "CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck,", "# PID isValidDoiCheck = IsValidDoiCheck() doiResolvesCheck = DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR creatorsOrcidCheck", "creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) #", "RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck, \\ SizesByteSizeCheck, \\ SubjectsNumberCheck, \\ SubjectsHaveDdcCheck,", ") ) self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"] ) ) # SUBJECT subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck()", "in enumerate(isInstitution): if inst: continue if booleanCheckResult[idx]: evaluation += 1/newTotal return evaluation self.add_evaluation(", "RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck = RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) # DESCRIPTIONS", ") ) self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck], 1, 300 ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"]", "PublicationYearCheck, \\ RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck, \\ RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck,", "contributorsType = checks[1].get_last_result(pid).outcome if rightsAreOpen: return 1 elif \"RightsHolder\" in contributorsType: return 1", "allow_type_to_enforce_institution(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome contributorTypes = checks[1].get_last_result(pid).outcome for idx,", "\\ IsBetweenEvaluation, \\ IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation from rdp import", "contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck() contributorsTypeCheck = ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks, pid): evaluation", "1 ) ) # FORMAT formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) # VERSION", "self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) # TITLE titlesJustAFileNameCheck = TitlesJustAFileNameCheck() titlesTypeCheck =", "Benchmark.__init__(self, \"Best Practice Benchmark\") self.skip = skip self.version = \"0.0.1\" self.id = \"BPG\"", ") def publishedEqualsIssued(checks, pid): return checks[0].get_last_result(pid).outcome \\ == checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck],", "[c.name for c in e.checks] == [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return False for c in", "we need to check Rightsholder! if [c.name for c in e.checks] == [\"RightsAreOpenCheck\",", "= DescriptionsNumberCheck() descriptionsLengthCheck = DescriptionsLengthCheck() descriptionsLanguageCheck = DescriptionsLanguageCheck() descriptionsTypeCheck = DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation(", "idx, inst in enumerate(isInstitution): if inst: if contributorTypes[idx] == \"HostingInstitution\": evaluation += 1/len(isInstitution)", "self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"]) ) def publishedEqualsIssued(checks, pid): return checks[0].get_last_result(pid).outcome \\ == checks[1].get_last_result(pid).outcome", "if inst: continue if booleanCheckResult[idx]: evaluation += 1/newTotal return evaluation self.add_evaluation( FunctionEvaluation( [", "for idx, t in enumerate(r1.outcome): if dateTypesHasInformation.get(t) is not None: dups += 1", "in enumerate(isInstitution): if inst: if contributorTypes[idx] == \"HostingInstitution\": evaluation += 1/len(isInstitution) else: evaluation", "[\"ContactPerson\", \"DataCollector\", \"DataCurator\", \"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\", \"ProjectMember\", \"Researcher\", \"RightsHolder\", \"WorkPackageLeader\" ], ) )", "descriptionsLanguageCheck = DescriptionsLanguageCheck() descriptionsTypeCheck = DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck], 1, sys.float_info.max ) )", "\"VersionSpecifiedCheck\": return True # Contributors are optional if len(rdp.metadata.contributors) == 0: # if", "1 ) ) self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"] ) ) # SUBJECT subjectsAreQualifiedCheck =", "benchmark # # Basis for this benchmark: 10.5281/zenodo.3559800 # ################################################################################ import sys from", "SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR", "contains code related to the DataCite best practice guide benchmark # # Basis", "class BPGBenchmark(Benchmark): \"\"\" This benchmark is inspired by the DataCite best practice guide", "# CREATOR creatorsOrcidCheck = CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck])", "= ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck() contributorsTypeCheck = ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks,", "= ContributorsContainInstitutionsCheck() contributorsTypeCheck = ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks, pid): evaluation = 0 isInstitution =", "self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None, 1 ) ) self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"] )", "\"Researcher\", \"RightsHolder\", \"WorkPackageLeader\" ], ) ) # DATES datesTypeCheck = DatesTypeCheck() publicationYearCheck =", "self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None, 1 ) ) self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"] ) )", "1, 300 ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"] ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck],", "type(c).__name__.startswith(\"RelatedResource\"): return True return False class BPGBenchmark(Benchmark): \"\"\" This benchmark is inspired by", "evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsTypeCheck ], allow_type_to_enforce_institution ) ) self.add_evaluation( InListEvaluation( [contributorsTypeCheck],", "IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation from rdp import Rdp def skip(e:", "= checks[0].get_last_result(pid).outcome contributorTypes = checks[1].get_last_result(pid).outcome for idx, inst in enumerate(isInstitution): if inst: if", "relatedResourceMetadataCheck = RelatedResourceMetadataCheck() relatedResourceTypeCheck = RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck], [ \"Describes\", \"IsDescribedBy\",", "language make no sense for these types if rdp.metadata.type in (\"Image\", \"PhysicalObject\"): for", "################################################################################ # Copyright: <NAME> 2020 # # Apache 2.0 License # # This", "self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR creatorsOrcidCheck = CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck() self.add_evaluation(", "0 dups = 0 dupsWithInfo = 0 dateTypesHasInformation = {} for idx, t", "dups == 0: return 1 else: return dupsWithInfo/dups self.add_evaluation( FunctionEvaluation( [datesTypeCheck, datesInformationCheck], duplicatesHaveInformation", "RelatedResourceMetadataCheck() relatedResourceTypeCheck = RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck], [ \"Describes\", \"IsDescribedBy\", \"HasPart\", \"IsPartOf\",", "\"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\", \"References\", \"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\",", "from breadp.benchmarks import Benchmark from breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck from breadp.checks.metadata import \\", "descriptionsTypeCheck = DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck], 1, sys.float_info.max ) ) self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck],", "[\"Abstract\"] ) ) self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\", \"Other\", None] ) ) #", "\"HasMetadata\", \"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\", \"References\", \"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\",", "\\ TitlesTypeCheck, \\ VersionSpecifiedCheck from breadp.evaluations import \\ ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation,", "if dateTypesHasInformation[t] and r2.outcome[idx] is not None: dupsWithInfo += 1 dateTypesHasInformation[t] = r2.outcome[idx]", "rdp import Rdp def skip(e: Evaluation, rdp: Rdp) -> bool: # Evaluating language", "Apache 2.0 License # # This file contains code related to the DataCite", "pid): rightsAreOpen = checks[0].get_last_result(pid).outcome contributorsType = checks[1].get_last_result(pid).outcome if rightsAreOpen: return 1 elif \"RightsHolder\"", "from breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck from breadp.checks.metadata import \\ CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck, \\", "TitlesLanguageCheck, \\ TitlesTypeCheck, \\ VersionSpecifiedCheck from breadp.evaluations import \\ ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation, \\", "True # Contributors are optional if len(rdp.metadata.contributors) == 0: # if the license", "ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck() contributorsTypeCheck = ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks, pid): evaluation = 0", ") ) # DATES datesTypeCheck = DatesTypeCheck() publicationYearCheck = PublicationYearCheck() datesIssuedYearCheck = DatesIssuedYearCheck()", "\"RightsHolder\" in contributorsType: return 1 else: return 0 self.add_evaluation( FunctionEvaluation( [rightsAreOpenCheck, contributorsTypeCheck], rightsHolderIfRightsClosed", "RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck = RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) # DESCRIPTIONS descriptionsNumberCheck = DescriptionsNumberCheck()", ") # TITLE titlesJustAFileNameCheck = TitlesJustAFileNameCheck() titlesTypeCheck = TitlesTypeCheck() titlesLanguageCheck = TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck]))", "\\ LanguageSpecifiedCheck, \\ PublicationYearCheck, \\ RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck, \\ RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck, \\", "self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) # TITLE titlesJustAFileNameCheck = TitlesJustAFileNameCheck() titlesTypeCheck = TitlesTypeCheck() titlesLanguageCheck =", "if [c.name for c in e.checks] == [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return False for c", "if len(rdp.metadata.relatedResources) == 0: for c in e.checks: if type(c).__name__.startswith(\"RelatedResource\"): return True return", "DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"]) ) def publishedEqualsIssued(checks, pid): return checks[0].get_last_result(pid).outcome \\ ==", "# # This file contains code related to the DataCite best practice guide", "dateTypesHasInformation[t] = r2.outcome[idx] is not None if dups == 0: return 1 else:", "= skip self.version = \"0.0.1\" self.id = \"BPG\" # PID isValidDoiCheck = IsValidDoiCheck()", "+= 1 if dateTypesHasInformation[t] and r2.outcome[idx] is not None: dupsWithInfo += 1 dateTypesHasInformation[t]", "= ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome booleanCheckResult =", "languageSpecifiedCheck = LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED RESOURCES relatedResourceMetadataCheck = RelatedResourceMetadataCheck() relatedResourceTypeCheck = RelatedResourceTypeCheck()", "\"ProjectLeader\", \"ProjectManager\", \"ProjectMember\", \"Researcher\", \"RightsHolder\", \"WorkPackageLeader\" ], ) ) # DATES datesTypeCheck =", "if type(c).__name__ == \"LanguageSpecifiedCheck\": return True # Version is optional if rdp.metadata.version is", "This file contains code related to the DataCite best practice guide benchmark #", "ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation, \\ Evaluation, \\ FalseEvaluation, \\ FunctionEvaluation,", ") # DATES datesTypeCheck = DatesTypeCheck() publicationYearCheck = PublicationYearCheck() datesIssuedYearCheck = DatesIssuedYearCheck() datesInformationCheck", "\\ SubjectsNumberCheck, \\ SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck, \\ TitlesLanguageCheck, \\", ") # MIXED def rightsHolderIfRightsClosed(checks, pid): rightsAreOpen = checks[0].get_last_result(pid).outcome contributorsType = checks[1].get_last_result(pid).outcome if", "= DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR creatorsOrcidCheck = CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck", "if the license is non-open, we need to check Rightsholder! if [c.name for", "\\ TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation from rdp import Rdp def skip(e: Evaluation,", "c in e.checks: if c.name.startswith(\"Contributors\"): return True # Related Resources are optional if", "ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck() contributorsTypeCheck = ContributorsTypeCheck() def allow_person_related_tests_to_be_skipped(checks, pid):", "[ contributorsContainInstitutionsCheck, contributorsTypeCheck ], allow_type_to_enforce_institution ) ) self.add_evaluation( InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\", \"DataCurator\",", "[\"SeriesInformation\", \"TableOfContents\", \"Other\", None] ) ) # MIXED def rightsHolderIfRightsClosed(checks, pid): rightsAreOpen =", "isValidDoiCheck = IsValidDoiCheck() doiResolvesCheck = DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR creatorsOrcidCheck = CreatorsOrcidCheck()", "for idx, inst in enumerate(isInstitution): if inst: continue if booleanCheckResult[idx]: evaluation += 1/newTotal", "enumerate(r1.outcome): if dateTypesHasInformation.get(t) is not None: dups += 1 if dateTypesHasInformation[t] and r2.outcome[idx]", "= FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) # VERSION versionSpecifiedCheck = VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS", "checks[1].get_last_result(pid) if 0 in (len(r1.outcome), len(r2.outcome)): return 0 dups = 0 dupsWithInfo =", "for this benchmark: 10.5281/zenodo.3559800 # ################################################################################ import sys from breadp.benchmarks import Benchmark from", ") ) self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"] ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"] )", "the DataCite best practice guide benchmark # # Basis for this benchmark: 10.5281/zenodo.3559800", "= DatesTypeCheck() publicationYearCheck = PublicationYearCheck() datesIssuedYearCheck = DatesIssuedYearCheck() datesInformationCheck = DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck],", "pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome booleanCheckResult = checks[1].get_last_result(pid).outcome newTotal = isInstitution.count(False)", "file contains code related to the DataCite best practice guide benchmark # #", "dateTypesHasInformation = {} for idx, t in enumerate(r1.outcome): if dateTypesHasInformation.get(t) is not None:", "\"ProjectMember\", \"Researcher\", \"RightsHolder\", \"WorkPackageLeader\" ], ) ) # DATES datesTypeCheck = DatesTypeCheck() publicationYearCheck", "titlesJustAFileNameCheck = TitlesJustAFileNameCheck() titlesTypeCheck = TitlesTypeCheck() titlesLanguageCheck = TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck],", "checks[1].get_last_result(pid).outcome if rightsAreOpen: return 1 elif \"RightsHolder\" in contributorsType: return 1 else: return", "breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck from breadp.checks.metadata import \\ CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck,", "PID isValidDoiCheck = IsValidDoiCheck() doiResolvesCheck = DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR creatorsOrcidCheck =", "\\ RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck, \\ SizesByteSizeCheck, \\ SubjectsNumberCheck, \\", "= checks[0].get_last_result(pid).outcome contributorsType = checks[1].get_last_result(pid).outcome if rightsAreOpen: return 1 elif \"RightsHolder\" in contributorsType:", "# RIGHTS rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck = RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation(", "TitlesJustAFileNameCheck() titlesTypeCheck = TitlesTypeCheck() titlesLanguageCheck = TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None, 1", "return False class BPGBenchmark(Benchmark): \"\"\" This benchmark is inspired by the DataCite best", "= SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) #", "return dupsWithInfo/dups self.add_evaluation( FunctionEvaluation( [datesTypeCheck, datesInformationCheck], duplicatesHaveInformation ) ) # LANGUAGE languageSpecifiedCheck =", "ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None, 1 ) ) self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"] ) ) #", "is non-open, we need to check Rightsholder! if [c.name for c in e.checks]", "\\ ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck, \\ DatesInformationCheck, \\ DatesIssuedYearCheck, \\ DatesTypeCheck, \\", "TITLE titlesJustAFileNameCheck = TitlesJustAFileNameCheck() titlesTypeCheck = TitlesTypeCheck() titlesLanguageCheck = TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation(", "e.checks: if type(c).__name__ == \"VersionSpecifiedCheck\": return True # Contributors are optional if len(rdp.metadata.contributors)", "1) ) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True, 1 ) ) # FORMAT formatsAreValidMediaTypeCheck =", "[\"en\"] ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"] ) ) self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\",", "enumerate(isInstitution): if inst: if contributorTypes[idx] == \"HostingInstitution\": evaluation += 1/len(isInstitution) else: evaluation +=", "TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None, 1 ) ) self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"]", "inst: if contributorTypes[idx] == \"HostingInstitution\": evaluation += 1/len(isInstitution) else: evaluation += 1/len(isInstitution) return", "1/len(isInstitution) return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsTypeCheck ], allow_type_to_enforce_institution ) ) self.add_evaluation(", "c in e.checks: if type(c).__name__ == \"VersionSpecifiedCheck\": return True # Contributors are optional", "in (\"Image\", \"PhysicalObject\"): for c in e.checks: if type(c).__name__ == \"LanguageSpecifiedCheck\": return True", ") ) self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\", \"Other\", None] ) ) # MIXED", "\\ DescriptionsLanguageCheck, \\ DescriptionsLengthCheck, \\ DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck, \\ PublicationYearCheck, \\", "contributorTypes[idx] == \"HostingInstitution\": evaluation += 1/len(isInstitution) else: evaluation += 1/len(isInstitution) return evaluation self.add_evaluation(", "self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck], [ \"Describes\", \"IsDescribedBy\", \"HasPart\", \"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\",", "SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max) ) self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck]))", "License # # This file contains code related to the DataCite best practice", "TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation from rdp import Rdp def skip(e: Evaluation, rdp: Rdp) ->", "import \\ ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation, \\ Evaluation, \\ FalseEvaluation,", "== checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued, ) ) def duplicatesHaveInformation(checks, pid): r1", "\\ ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck, \\ DatesInformationCheck, \\ DatesIssuedYearCheck, \\", "], allow_person_related_tests_to_be_skipped ) ) self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped ) )", "rightsHolderIfRightsClosed(checks, pid): rightsAreOpen = checks[0].get_last_result(pid).outcome contributorsType = checks[1].get_last_result(pid).outcome if rightsAreOpen: return 1 elif", "t in enumerate(r1.outcome): if dateTypesHasInformation.get(t) is not None: dups += 1 if dateTypesHasInformation[t]", "contributorsContainInstitutionsCheck, contributorsTypeCheck ], allow_type_to_enforce_institution ) ) self.add_evaluation( InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\", \"DataCurator\", \"HostingInstitution\",", "booleanCheckResult = checks[1].get_last_result(pid).outcome newTotal = isInstitution.count(False) for idx, inst in enumerate(isInstitution): if inst:", "if dups == 0: return 1 else: return dupsWithInfo/dups self.add_evaluation( FunctionEvaluation( [datesTypeCheck, datesInformationCheck],", "\"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\" ] ) ) # SIZE sizesNumberCheck", "\\ SizesByteSizeCheck, \\ SubjectsNumberCheck, \\ SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck, \\ TitlesJustAFileNameCheck, \\", "sys from breadp.benchmarks import Benchmark from breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck from breadp.checks.metadata import", "\"RightsHolder\", \"WorkPackageLeader\" ], ) ) # DATES datesTypeCheck = DatesTypeCheck() publicationYearCheck = PublicationYearCheck()", "VERSION versionSpecifiedCheck = VersionSpecifiedCheck() self.add_evaluation(TrueEvaluation([versionSpecifiedCheck])) # RIGHTS rightsHasAtLeastOneLicenseCheck = RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck()", "= CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck])", "checks[0].get_last_result(pid).outcome \\ == checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued, ) ) def duplicatesHaveInformation(checks,", "TitlesJustAFileNameCheck, \\ TitlesLanguageCheck, \\ TitlesTypeCheck, \\ VersionSpecifiedCheck from breadp.evaluations import \\ ContainsAllEvaluation, \\", "\\ DoesNotContainEvaluation, \\ Evaluation, \\ FalseEvaluation, \\ FunctionEvaluation, \\ InListEvaluation, \\ IsBetweenEvaluation, \\", ") def duplicatesHaveInformation(checks, pid): r1 = checks[0].get_last_result(pid) r2 = checks[1].get_last_result(pid) if 0 in", "Benchmark\") self.skip = skip self.version = \"0.0.1\" self.id = \"BPG\" # PID isValidDoiCheck", "<filename>breadp/benchmarks/example.py ################################################################################ # Copyright: <NAME> 2020 # # Apache 2.0 License # #", "<NAME> 2020 # # Apache 2.0 License # # This file contains code", "def skip(e: Evaluation, rdp: Rdp) -> bool: # Evaluating language make no sense", "= SubjectsNumberCheck() subjectsHaveDdcCheck = SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck],", "is not None: dups += 1 if dateTypesHasInformation[t] and r2.outcome[idx] is not None:", "sizesNumberCheck = SizesNumberCheck() sizesByteSizeCheck = SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1) ) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck],", "\"Other\", None] ) ) # MIXED def rightsHolderIfRightsClosed(checks, pid): rightsAreOpen = checks[0].get_last_result(pid).outcome contributorsType", "ContributorsTypeCheck, \\ DatesInformationCheck, \\ DatesIssuedYearCheck, \\ DatesTypeCheck, \\ DescriptionsTypeCheck, \\ DescriptionsLanguageCheck, \\ DescriptionsLengthCheck,", "False class BPGBenchmark(Benchmark): \"\"\" This benchmark is inspired by the DataCite best practice", "if rightsAreOpen: return 1 elif \"RightsHolder\" in contributorsType: return 1 else: return 0", "import Rdp def skip(e: Evaluation, rdp: Rdp) -> bool: # Evaluating language make", "Contributors are optional if len(rdp.metadata.contributors) == 0: # if the license is non-open,", "dups += 1 if dateTypesHasInformation[t] and r2.outcome[idx] is not None: dupsWithInfo += 1", "self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\", \"Other\", None] ) ) # MIXED def rightsHolderIfRightsClosed(checks,", "ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation, \\ Evaluation, \\ FalseEvaluation, \\ FunctionEvaluation, \\ InListEvaluation, \\ IsBetweenEvaluation,", "non-open, we need to check Rightsholder! if [c.name for c in e.checks] ==", "self.add_evaluation( ContainsAllEvaluation( [titlesLanguageCheck], [\"en\"] ) ) # SUBJECT subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck() subjectsNumberCheck =", "True, 1 ) ) # FORMAT formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([formatsAreValidMediaTypeCheck]) ) #", "rdp: Rdp) -> bool: # Evaluating language make no sense for these types", "publicationYearCheck = PublicationYearCheck() datesIssuedYearCheck = DatesIssuedYearCheck() datesInformationCheck = DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"])", "0 in (len(r1.outcome), len(r2.outcome)): return 0 dups = 0 dupsWithInfo = 0 dateTypesHasInformation", "type(c).__name__ == \"LanguageSpecifiedCheck\": return True # Version is optional if rdp.metadata.version is None:", "titlesLanguageCheck = TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None, 1 ) ) self.add_evaluation( ContainsAllEvaluation(", "evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped ) ) self.add_evaluation( FunctionEvaluation( [", "\\ DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck, \\ PublicationYearCheck, \\ RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck, \\", "Benchmark from breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck from breadp.checks.metadata import \\ CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck,", "= CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) # TITLE", "isInstitution = checks[0].get_last_result(pid).outcome contributorTypes = checks[1].get_last_result(pid).outcome for idx, inst in enumerate(isInstitution): if inst:", ") ) # LANGUAGE languageSpecifiedCheck = LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED RESOURCES relatedResourceMetadataCheck =", "RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck, \\ SizesByteSizeCheck, \\ SubjectsNumberCheck, \\ SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck, \\ SubjectsHaveWikidataKeywordsCheck,", "\\ PublicationYearCheck, \\ RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck, \\ RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck, \\", "\\ InListEvaluation, \\ IsBetweenEvaluation, \\ IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation, \\ TrueEvaluation from", "\"\"\" This benchmark is inspired by the DataCite best practice guide (DOI: 10.5281/zenodo.3559799)", "= CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck])", "self.skip = skip self.version = \"0.0.1\" self.id = \"BPG\" # PID isValidDoiCheck =", "\\ FunctionEvaluation, \\ InListEvaluation, \\ IsBetweenEvaluation, \\ IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation, \\", "\\ DescriptionsLengthCheck, \\ DescriptionsNumberCheck, \\ FormatsAreValidMediaTypeCheck, \\ LanguageSpecifiedCheck, \\ PublicationYearCheck, \\ RelatedResourceMetadataCheck, \\", ") self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) # TITLE titlesJustAFileNameCheck = TitlesJustAFileNameCheck() titlesTypeCheck", "dateTypesHasInformation.get(t) is not None: dups += 1 if dateTypesHasInformation[t] and r2.outcome[idx] is not", "\\ ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation, \\ Evaluation, \\ FalseEvaluation, \\ FunctionEvaluation, \\", "# # Basis for this benchmark: 10.5281/zenodo.3559800 # ################################################################################ import sys from breadp.benchmarks", "return checks[0].get_last_result(pid).outcome \\ == checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued, ) ) def", "FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsTypeCheck ], allow_type_to_enforce_institution ) ) self.add_evaluation( InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\",", "make no sense for these types if rdp.metadata.type in (\"Image\", \"PhysicalObject\"): for c", "= RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) # DESCRIPTIONS descriptionsNumberCheck = DescriptionsNumberCheck() descriptionsLengthCheck =", "ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck, \\ DatesInformationCheck, \\ DatesIssuedYearCheck, \\ DatesTypeCheck, \\ DescriptionsTypeCheck,", ") # SUBJECT subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck() subjectsNumberCheck = SubjectsNumberCheck() subjectsHaveDdcCheck = SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck", "DatesInformationCheck, \\ DatesIssuedYearCheck, \\ DatesTypeCheck, \\ DescriptionsTypeCheck, \\ DescriptionsLanguageCheck, \\ DescriptionsLengthCheck, \\ DescriptionsNumberCheck,", ") self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True, 1 ) ) # FORMAT formatsAreValidMediaTypeCheck = FormatsAreValidMediaTypeCheck()", "self.version = \"0.0.1\" self.id = \"BPG\" # PID isValidDoiCheck = IsValidDoiCheck() doiResolvesCheck =", "benchmark: 10.5281/zenodo.3559800 # ################################################################################ import sys from breadp.benchmarks import Benchmark from breadp.checks.pid import", "c in e.checks] == [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return False for c in e.checks: if", "= TitlesJustAFileNameCheck() titlesTypeCheck = TitlesTypeCheck() titlesLanguageCheck = TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None,", "checks[0].get_last_result(pid) r2 = checks[1].get_last_result(pid) if 0 in (len(r1.outcome), len(r2.outcome)): return 0 dups =", "[descriptionsNumberCheck], 1, sys.float_info.max ) ) self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck], 1, 300 ) ) self.add_evaluation(", "FalseEvaluation, \\ FunctionEvaluation, \\ InListEvaluation, \\ IsBetweenEvaluation, \\ IsIdenticalToEvaluation, \\ TheMoreTrueTheBetterEvaluation, \\ TheMoreFalseTheBetterEvaluation,", "DATES datesTypeCheck = DatesTypeCheck() publicationYearCheck = PublicationYearCheck() datesIssuedYearCheck = DatesIssuedYearCheck() datesInformationCheck = DatesInformationCheck()", "IsIdenticalToEvaluation([sizesNumberCheck], 1) ) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True, 1 ) ) # FORMAT formatsAreValidMediaTypeCheck", "descriptionsNumberCheck = DescriptionsNumberCheck() descriptionsLengthCheck = DescriptionsLengthCheck() descriptionsLanguageCheck = DescriptionsLanguageCheck() descriptionsTypeCheck = DescriptionsTypeCheck() self.add_evaluation(", "return 1 else: return dupsWithInfo/dups self.add_evaluation( FunctionEvaluation( [datesTypeCheck, datesInformationCheck], duplicatesHaveInformation ) ) #", "\\ DatesInformationCheck, \\ DatesIssuedYearCheck, \\ DatesTypeCheck, \\ DescriptionsTypeCheck, \\ DescriptionsLanguageCheck, \\ DescriptionsLengthCheck, \\", "\\ RightsHasAtLeastOneLicenseCheck, \\ SizesNumberCheck, \\ SizesByteSizeCheck, \\ SubjectsNumberCheck, \\ SubjectsHaveDdcCheck, \\ SubjectsAreQualifiedCheck, \\", "\"ProjectManager\", \"ProjectMember\", \"Researcher\", \"RightsHolder\", \"WorkPackageLeader\" ], ) ) # DATES datesTypeCheck = DatesTypeCheck()", "= TitlesTypeCheck() titlesLanguageCheck = TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [titlesTypeCheck], None, 1 ) )", "\"IsDescribedBy\", \"HasPart\", \"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\", \"References\", \"IsReferencedBy\",", "# LANGUAGE languageSpecifiedCheck = LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED RESOURCES relatedResourceMetadataCheck = RelatedResourceMetadataCheck() relatedResourceTypeCheck", "], allow_type_to_enforce_institution ) ) self.add_evaluation( InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\", \"DataCurator\", \"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\",", "InListEvaluation( [relatedResourceTypeCheck], [ \"Describes\", \"IsDescribedBy\", \"HasPart\", \"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\",", "[ contributorsContainInstitutionsCheck, contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped ) ) self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ],", "\"Collected\"]) ) def publishedEqualsIssued(checks, pid): return checks[0].get_last_result(pid).outcome \\ == checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation( [publicationYearCheck,", "# Evaluating language make no sense for these types if rdp.metadata.type in (\"Image\",", "return 1 elif \"RightsHolder\" in contributorsType: return 1 else: return 0 self.add_evaluation( FunctionEvaluation(", "idx, inst in enumerate(isInstitution): if inst: continue if booleanCheckResult[idx]: evaluation += 1/newTotal return", "return 0 dups = 0 dupsWithInfo = 0 dateTypesHasInformation = {} for idx,", "\\ VersionSpecifiedCheck from breadp.evaluations import \\ ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation,", "# if the license is non-open, we need to check Rightsholder! if [c.name", "return True # Version is optional if rdp.metadata.version is None: for c in", "FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued, ) ) def duplicatesHaveInformation(checks, pid): r1 = checks[0].get_last_result(pid) r2", "duplicatesHaveInformation ) ) # LANGUAGE languageSpecifiedCheck = LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED RESOURCES relatedResourceMetadataCheck", "InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\", \"DataCurator\", \"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\", \"ProjectMember\", \"Researcher\", \"RightsHolder\", \"WorkPackageLeader\" ],", "datesTypeCheck = DatesTypeCheck() publicationYearCheck = PublicationYearCheck() datesIssuedYearCheck = DatesIssuedYearCheck() datesInformationCheck = DatesInformationCheck() self.add_evaluation(", "c in e.checks: if type(c).__name__ == \"LanguageSpecifiedCheck\": return True # Version is optional", "checks[1].get_last_result(pid).outcome for idx, inst in enumerate(isInstitution): if inst: if contributorTypes[idx] == \"HostingInstitution\": evaluation", "= DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck], 1, sys.float_info.max ) ) self.add_evaluation( IsBetweenEvaluation( [descriptionsLengthCheck], 1,", "if rdp.metadata.version is None: for c in e.checks: if type(c).__name__ == \"VersionSpecifiedCheck\": return", "if rdp.metadata.type in (\"Image\", \"PhysicalObject\"): for c in e.checks: if type(c).__name__ == \"LanguageSpecifiedCheck\":", "CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck() creatorsContainInstitutionsCheck = CreatorsContainInstitutionsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) )", "0 isInstitution = checks[0].get_last_result(pid).outcome booleanCheckResult = checks[1].get_last_result(pid).outcome newTotal = isInstitution.count(False) for idx, inst", "inspired by the DataCite best practice guide (DOI: 10.5281/zenodo.3559799) \"\"\" def __init__(self, name=None):", "breadp.checks.metadata import \\ CreatorsOrcidCheck, \\ CreatorsFamilyAndGivenNameCheck, \\ CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck, \\", "\"IsDerivedFrom\", \"References\", \"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\" ] ) ) #", "relatedResourceTypeCheck = RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck], [ \"Describes\", \"IsDescribedBy\", \"HasPart\", \"IsPartOf\", \"HasMetadata\",", "[\"en\"] ) ) # SUBJECT subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck() subjectsNumberCheck = SubjectsNumberCheck() subjectsHaveDdcCheck =", ") ) self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped ) ) def allow_type_to_enforce_institution(checks,", "are optional if len(rdp.metadata.relatedResources) == 0: for c in e.checks: if type(c).__name__.startswith(\"RelatedResource\"): return", "TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) # TITLE titlesJustAFileNameCheck = TitlesJustAFileNameCheck()", "booleanCheckResult[idx]: evaluation += 1/newTotal return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsOrcidCheck ], allow_person_related_tests_to_be_skipped", "\\ ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation, \\ ContainsItemExactlyNTimesEvaluation, \\ DoesNotContainEvaluation, \\ Evaluation, \\ FalseEvaluation, \\", "# Version is optional if rdp.metadata.version is None: for c in e.checks: if", "code related to the DataCite best practice guide benchmark # # Basis for", "\\ TitlesLanguageCheck, \\ TitlesTypeCheck, \\ VersionSpecifiedCheck from breadp.evaluations import \\ ContainsAllEvaluation, \\ ContainsAtLeastOneEvaluation,", "return False for c in e.checks: if c.name.startswith(\"Contributors\"): return True # Related Resources", "\\ ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck, \\ DatesInformationCheck, \\ DatesIssuedYearCheck, \\ DatesTypeCheck, \\ DescriptionsTypeCheck, \\", "datesInformationCheck], duplicatesHaveInformation ) ) # LANGUAGE languageSpecifiedCheck = LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED RESOURCES", "\"IsPartOf\", \"HasMetadata\", \"IsMetadataFor\", \"HasVersion\", \"IsVersionOf\", \"IsNewVersionOf\", \"IsPreviousVersionOf\", \"IsSourceOf\", \"IsDerivedFrom\", \"References\", \"IsReferencedBy\", \"IsVariantFormOf\", \"IsIdenticalTo\",", "in e.checks: if type(c).__name__ == \"VersionSpecifiedCheck\": return True # Contributors are optional if", "c.name.startswith(\"Contributors\"): return True # Related Resources are optional if len(rdp.metadata.relatedResources) == 0: for", "= checks[1].get_last_result(pid).outcome newTotal = isInstitution.count(False) for idx, inst in enumerate(isInstitution): if inst: continue", "CONTRIBUTOR contributorsOrcidCheck = ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck() contributorsTypeCheck = ContributorsTypeCheck()", "# DESCRIPTIONS descriptionsNumberCheck = DescriptionsNumberCheck() descriptionsLengthCheck = DescriptionsLengthCheck() descriptionsLanguageCheck = DescriptionsLanguageCheck() descriptionsTypeCheck =", "= checks[0].get_last_result(pid).outcome booleanCheckResult = checks[1].get_last_result(pid).outcome newTotal = isInstitution.count(False) for idx, inst in enumerate(isInstitution):", "BPGBenchmark(Benchmark): \"\"\" This benchmark is inspired by the DataCite best practice guide (DOI:", "checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued, ) ) def duplicatesHaveInformation(checks, pid): r1 =", "= PublicationYearCheck() datesIssuedYearCheck = DatesIssuedYearCheck() datesInformationCheck = DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"]) )", "+= 1/len(isInstitution) else: evaluation += 1/len(isInstitution) return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsTypeCheck", "= DescriptionsLengthCheck() descriptionsLanguageCheck = DescriptionsLanguageCheck() descriptionsTypeCheck = DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck], 1, sys.float_info.max", "LanguageSpecifiedCheck, \\ PublicationYearCheck, \\ RelatedResourceMetadataCheck, \\ RelatedResourceTypeCheck, \\ RightsAreOpenCheck, \\ RightsHaveValidSPDXIdentifierCheck, \\ RightsHasAtLeastOneLicenseCheck,", "[descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\", \"Other\", None] ) ) # MIXED def rightsHolderIfRightsClosed(checks, pid): rightsAreOpen", "pid): return checks[0].get_last_result(pid).outcome \\ == checks[1].get_last_result(pid).outcome self.add_evaluation( FunctionEvaluation( [publicationYearCheck, datesIssuedYearCheck], publishedEqualsIssued, ) )", "self.add_evaluation( ContainsAllEvaluation( [descriptionsLanguageCheck], [\"en\"] ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"] ) ) self.add_evaluation(", "[titlesLanguageCheck], [\"en\"] ) ) # SUBJECT subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck() subjectsNumberCheck = SubjectsNumberCheck() subjectsHaveDdcCheck", "contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped ) ) def allow_type_to_enforce_institution(checks, pid): evaluation = 0 isInstitution", "# TITLE titlesJustAFileNameCheck = TitlesJustAFileNameCheck() titlesTypeCheck = TitlesTypeCheck() titlesLanguageCheck = TitlesLanguageCheck() self.add_evaluation(FalseEvaluation([titlesJustAFileNameCheck])) self.add_evaluation(", "evaluation += 1/len(isInstitution) return evaluation self.add_evaluation( FunctionEvaluation( [ contributorsContainInstitutionsCheck, contributorsTypeCheck ], allow_type_to_enforce_institution )", "= r2.outcome[idx] is not None if dups == 0: return 1 else: return", "\"LanguageSpecifiedCheck\": return True # Version is optional if rdp.metadata.version is None: for c", "None: for c in e.checks: if type(c).__name__ == \"VersionSpecifiedCheck\": return True # Contributors", "pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome contributorTypes = checks[1].get_last_result(pid).outcome for idx, inst", "self.add_evaluation(TrueEvaluation([subjectsHaveDdcCheck])) self.add_evaluation(TrueEvaluation([subjectsHaveWikidataKeywordsCheck])) # CONTRIBUTOR contributorsOrcidCheck = ContributorsOrcidCheck() contributorsFamilyAndGivenNameCheck = ContributorsFamilyAndGivenNameCheck() contributorsContainInstitutionsCheck = ContributorsContainInstitutionsCheck()", "elif \"RightsHolder\" in contributorsType: return 1 else: return 0 self.add_evaluation( FunctionEvaluation( [rightsAreOpenCheck, contributorsTypeCheck],", "subjectsHaveDdcCheck = SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck]) ) self.add_evaluation( IsBetweenEvaluation([subjectsNumberCheck], 1, sys.float_info.max)", "\"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\", \"ProjectMember\", \"Researcher\", \"RightsHolder\", \"WorkPackageLeader\" ], ) ) # DATES datesTypeCheck", "Copyright: <NAME> 2020 # # Apache 2.0 License # # This file contains", "checks[1].get_last_result(pid).outcome newTotal = isInstitution.count(False) for idx, inst in enumerate(isInstitution): if inst: continue if", "[ contributorsContainInstitutionsCheck, contributorsFamilyAndGivenNameCheck ], allow_person_related_tests_to_be_skipped ) ) def allow_type_to_enforce_institution(checks, pid): evaluation = 0", "SUBJECT subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck() subjectsNumberCheck = SubjectsNumberCheck() subjectsHaveDdcCheck = SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck()", "if type(c).__name__ == \"VersionSpecifiedCheck\": return True # Contributors are optional if len(rdp.metadata.contributors) ==", "for c in e.checks] == [\"RightsAreOpenCheck\", \"ContributorsTypeCheck\"]: return False for c in e.checks:", "\\ CreatorsContainInstitutionsCheck, \\ ContributorsOrcidCheck, \\ ContributorsFamilyAndGivenNameCheck, \\ ContributorsContainInstitutionsCheck, \\ ContributorsTypeCheck, \\ DatesInformationCheck, \\", "None: dups += 1 if dateTypesHasInformation[t] and r2.outcome[idx] is not None: dupsWithInfo +=", "= RelatedResourceMetadataCheck() relatedResourceTypeCheck = RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck], [ \"Describes\", \"IsDescribedBy\", \"HasPart\",", "is inspired by the DataCite best practice guide (DOI: 10.5281/zenodo.3559799) \"\"\" def __init__(self,", "if c.name.startswith(\"Contributors\"): return True # Related Resources are optional if len(rdp.metadata.relatedResources) == 0:", "= DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"]) ) def publishedEqualsIssued(checks, pid): return checks[0].get_last_result(pid).outcome \\", "= RightsHasAtLeastOneLicenseCheck() rightsHaveValidSPDXIdentifierCheck = RightsHaveValidSPDXIdentifierCheck() rightsAreOpenCheck = RightsAreOpenCheck() self.add_evaluation(TrueEvaluation([rightsHasAtLeastOneLicenseCheck])) self.add_evaluation( TheMoreTrueTheBetterEvaluation([rightsHaveValidSPDXIdentifierCheck]) ) #", "skip(e: Evaluation, rdp: Rdp) -> bool: # Evaluating language make no sense for", ") self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\", \"Other\", None] ) ) # MIXED def", "PublicationYearCheck() datesIssuedYearCheck = DatesIssuedYearCheck() datesInformationCheck = DatesInformationCheck() self.add_evaluation( ContainsAtLeastOneEvaluation([datesTypeCheck], [\"Created\", \"Collected\"]) ) def", "Rdp) -> bool: # Evaluating language make no sense for these types if", "0: return 1 else: return dupsWithInfo/dups self.add_evaluation( FunctionEvaluation( [datesTypeCheck, datesInformationCheck], duplicatesHaveInformation ) )", "self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1) ) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True, 1 ) ) # FORMAT", "= DescriptionsLanguageCheck() descriptionsTypeCheck = DescriptionsTypeCheck() self.add_evaluation( IsBetweenEvaluation( [descriptionsNumberCheck], 1, sys.float_info.max ) ) self.add_evaluation(", "[descriptionsLanguageCheck], [\"en\"] ) ) self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"] ) ) self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck],", "== 0: # if the license is non-open, we need to check Rightsholder!", "= SubjectsAreQualifiedCheck() subjectsNumberCheck = SubjectsNumberCheck() subjectsHaveDdcCheck = SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck() self.add_evaluation( TheMoreTrueTheBetterEvaluation([subjectsAreQualifiedCheck])", "DataCite best practice guide benchmark # # Basis for this benchmark: 10.5281/zenodo.3559800 #", "\"IsIdenticalTo\", \"IsSupplementTo\", \"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\" ] ) ) # SIZE sizesNumberCheck = SizesNumberCheck()", ") ) self.add_evaluation( InListEvaluation( [contributorsTypeCheck], [\"ContactPerson\", \"DataCollector\", \"DataCurator\", \"HostingInstitution\", \"ProjectLeader\", \"ProjectManager\", \"ProjectMember\", \"Researcher\",", "Related Resources are optional if len(rdp.metadata.relatedResources) == 0: for c in e.checks: if", "e.checks: if type(c).__name__ == \"LanguageSpecifiedCheck\": return True # Version is optional if rdp.metadata.version", "self.add_evaluation( ContainsAllEvaluation( [descriptionsTypeCheck], [\"Abstract\"] ) ) self.add_evaluation( DoesNotContainEvaluation( [descriptionsTypeCheck], [\"SeriesInformation\", \"TableOfContents\", \"Other\", None]", "self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsOrcidCheck]) ) self.add_evaluation( TheMoreTrueTheBetterEvaluation([creatorsFamilyAndGivenNameCheck]) ) self.add_evaluation( TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) # TITLE titlesJustAFileNameCheck =", ") # LANGUAGE languageSpecifiedCheck = LanguageSpecifiedCheck() self.add_evaluation(TrueEvaluation([languageSpecifiedCheck])) # RELATED RESOURCES relatedResourceMetadataCheck = RelatedResourceMetadataCheck()", "doiResolvesCheck = DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR creatorsOrcidCheck = CreatorsOrcidCheck() creatorsFamilyAndGivenNameCheck = CreatorsFamilyAndGivenNameCheck()", "sizesByteSizeCheck = SizesByteSizeCheck() self.add_evaluation( IsIdenticalToEvaluation([sizesNumberCheck], 1) ) self.add_evaluation( ContainsItemExactlyNTimesEvaluation( [sizesByteSizeCheck], True, 1 )", "Evaluation, rdp: Rdp) -> bool: # Evaluating language make no sense for these", "# This file contains code related to the DataCite best practice guide benchmark", "\"Best Practice Benchmark\") self.skip = skip self.version = \"0.0.1\" self.id = \"BPG\" #", "\"IsSupplementedBy\", \"Documents\", \"IsDocumentedBy\" ] ) ) # SIZE sizesNumberCheck = SizesNumberCheck() sizesByteSizeCheck =", "# SUBJECT subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck() subjectsNumberCheck = SubjectsNumberCheck() subjectsHaveDdcCheck = SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck =", "], allow_person_related_tests_to_be_skipped ) ) def allow_type_to_enforce_institution(checks, pid): evaluation = 0 isInstitution = checks[0].get_last_result(pid).outcome", "e.checks: if type(c).__name__.startswith(\"RelatedResource\"): return True return False class BPGBenchmark(Benchmark): \"\"\" This benchmark is", "contributorsType: return 1 else: return 0 self.add_evaluation( FunctionEvaluation( [rightsAreOpenCheck, contributorsTypeCheck], rightsHolderIfRightsClosed ) )", "in e.checks: if c.name.startswith(\"Contributors\"): return True # Related Resources are optional if len(rdp.metadata.relatedResources)", "10.5281/zenodo.3559799) \"\"\" def __init__(self, name=None): Benchmark.__init__(self, \"Best Practice Benchmark\") self.skip = skip self.version", "\"BPG\" # PID isValidDoiCheck = IsValidDoiCheck() doiResolvesCheck = DoiResolvesCheck() self.add_evaluation(TrueEvaluation([isValidDoiCheck])) self.add_evaluation(TrueEvaluation([doiResolvesCheck])) # CREATOR", "TheMoreFalseTheBetterEvaluation([creatorsContainInstitutionsCheck]) ) # TITLE titlesJustAFileNameCheck = TitlesJustAFileNameCheck() titlesTypeCheck = TitlesTypeCheck() titlesLanguageCheck = TitlesLanguageCheck()", "inst in enumerate(isInstitution): if inst: continue if booleanCheckResult[idx]: evaluation += 1/newTotal return evaluation", "RELATED RESOURCES relatedResourceMetadataCheck = RelatedResourceMetadataCheck() relatedResourceTypeCheck = RelatedResourceTypeCheck() self.add_evaluation(TrueEvaluation([relatedResourceMetadataCheck])) self.add_evaluation( InListEvaluation( [relatedResourceTypeCheck], [", "= checks[1].get_last_result(pid).outcome if rightsAreOpen: return 1 elif \"RightsHolder\" in contributorsType: return 1 else:", "breadp.benchmarks import Benchmark from breadp.checks.pid import IsValidDoiCheck, DoiResolvesCheck from breadp.checks.metadata import \\ CreatorsOrcidCheck,", "\"Documents\", \"IsDocumentedBy\" ] ) ) # SIZE sizesNumberCheck = SizesNumberCheck() sizesByteSizeCheck = SizesByteSizeCheck()", "subjectsAreQualifiedCheck = SubjectsAreQualifiedCheck() subjectsNumberCheck = SubjectsNumberCheck() subjectsHaveDdcCheck = SubjectsHaveDdcCheck() subjectsHaveWikidataKeywordsCheck = SubjectsHaveWikidataKeywordsCheck() self.add_evaluation(", "= \"0.0.1\" self.id = \"BPG\" # PID isValidDoiCheck = IsValidDoiCheck() doiResolvesCheck = DoiResolvesCheck()", "False for c in e.checks: if c.name.startswith(\"Contributors\"): return True # Related Resources are", "len(rdp.metadata.relatedResources) == 0: for c in e.checks: if type(c).__name__.startswith(\"RelatedResource\"): return True return False" ]
[ "else: if length > 0: word_table[length] = 1 print(\"Length Count\") for key in", "filename as input and displays the count for each word length, ignoring punctuation", "= len(stripped) if length in word_table.keys(): word_table[length] += 1 else: if length >", "0: word_table[length] = 1 print(\"Length Count\") for key in word_table: print(\"{0} {1}\".format(key, word_table[key]))", "]\", \"\", word) length = len(stripped) if length in word_table.keys(): word_table[length] += 1", "\"\", word) length = len(stripped) if length in word_table.keys(): word_table[length] += 1 else:", "len(stripped) if length in word_table.keys(): word_table[length] += 1 else: if length > 0:", "+= 1 else: if length > 0: word_table[length] = 1 print(\"Length Count\") for", "word_table[length] += 1 else: if length > 0: word_table[length] = 1 print(\"Length Count\")", "if length in word_table.keys(): word_table[length] += 1 else: if length > 0: word_table[length]", "program takes a filename as input and displays the count for each word", "count for each word length, ignoring punctuation and non-alphanumeric characters. \"\"\" import re,", "for word in line.split(): stripped = re.sub(\"[^a-zA-Z ]\", \"\", word) length = len(stripped)", "in f: for word in line.split(): stripped = re.sub(\"[^a-zA-Z ]\", \"\", word) length", "1 else: if length > 0: word_table[length] = 1 print(\"Length Count\") for key", "length, ignoring punctuation and non-alphanumeric characters. \"\"\" import re, sys word_table = {}", "takes a filename as input and displays the count for each word length,", "re.sub(\"[^a-zA-Z ]\", \"\", word) length = len(stripped) if length in word_table.keys(): word_table[length] +=", "for each word length, ignoring punctuation and non-alphanumeric characters. \"\"\" import re, sys", "non-alphanumeric characters. \"\"\" import re, sys word_table = {} with open(sys.argv[1]) as f:", "\"\"\" import re, sys word_table = {} with open(sys.argv[1]) as f: for line", "f: for word in line.split(): stripped = re.sub(\"[^a-zA-Z ]\", \"\", word) length =", "in line.split(): stripped = re.sub(\"[^a-zA-Z ]\", \"\", word) length = len(stripped) if length", "input and displays the count for each word length, ignoring punctuation and non-alphanumeric", "word) length = len(stripped) if length in word_table.keys(): word_table[length] += 1 else: if", "a filename as input and displays the count for each word length, ignoring", "characters. \"\"\" import re, sys word_table = {} with open(sys.argv[1]) as f: for", "This program takes a filename as input and displays the count for each", "and non-alphanumeric characters. \"\"\" import re, sys word_table = {} with open(sys.argv[1]) as", "word_table = {} with open(sys.argv[1]) as f: for line in f: for word", "stripped = re.sub(\"[^a-zA-Z ]\", \"\", word) length = len(stripped) if length in word_table.keys():", "if length > 0: word_table[length] = 1 print(\"Length Count\") for key in word_table:", "as f: for line in f: for word in line.split(): stripped = re.sub(\"[^a-zA-Z", "sys word_table = {} with open(sys.argv[1]) as f: for line in f: for", "word in line.split(): stripped = re.sub(\"[^a-zA-Z ]\", \"\", word) length = len(stripped) if", "= re.sub(\"[^a-zA-Z ]\", \"\", word) length = len(stripped) if length in word_table.keys(): word_table[length]", "\"\"\" This program takes a filename as input and displays the count for", "each word length, ignoring punctuation and non-alphanumeric characters. \"\"\" import re, sys word_table", "open(sys.argv[1]) as f: for line in f: for word in line.split(): stripped =", "length = len(stripped) if length in word_table.keys(): word_table[length] += 1 else: if length", "as input and displays the count for each word length, ignoring punctuation and", "ignoring punctuation and non-alphanumeric characters. \"\"\" import re, sys word_table = {} with", "line in f: for word in line.split(): stripped = re.sub(\"[^a-zA-Z ]\", \"\", word)", "> 0: word_table[length] = 1 print(\"Length Count\") for key in word_table: print(\"{0} {1}\".format(key,", "= {} with open(sys.argv[1]) as f: for line in f: for word in", "line.split(): stripped = re.sub(\"[^a-zA-Z ]\", \"\", word) length = len(stripped) if length in", "punctuation and non-alphanumeric characters. \"\"\" import re, sys word_table = {} with open(sys.argv[1])", "displays the count for each word length, ignoring punctuation and non-alphanumeric characters. \"\"\"", "re, sys word_table = {} with open(sys.argv[1]) as f: for line in f:", "length in word_table.keys(): word_table[length] += 1 else: if length > 0: word_table[length] =", "import re, sys word_table = {} with open(sys.argv[1]) as f: for line in", "the count for each word length, ignoring punctuation and non-alphanumeric characters. \"\"\" import", "with open(sys.argv[1]) as f: for line in f: for word in line.split(): stripped", "length > 0: word_table[length] = 1 print(\"Length Count\") for key in word_table: print(\"{0}", "word length, ignoring punctuation and non-alphanumeric characters. \"\"\" import re, sys word_table =", "in word_table.keys(): word_table[length] += 1 else: if length > 0: word_table[length] = 1", "for line in f: for word in line.split(): stripped = re.sub(\"[^a-zA-Z ]\", \"\",", "word_table.keys(): word_table[length] += 1 else: if length > 0: word_table[length] = 1 print(\"Length", "and displays the count for each word length, ignoring punctuation and non-alphanumeric characters.", "f: for line in f: for word in line.split(): stripped = re.sub(\"[^a-zA-Z ]\",", "{} with open(sys.argv[1]) as f: for line in f: for word in line.split():", "#!/usr/local/bin/python3 \"\"\" This program takes a filename as input and displays the count" ]
[ "range(10): sj = str(j) checks += 1 if encode(sk+si+si) == encode(sk+sj+sj): errors +=", "unittest class GeneralTest(unittest.TestCase): def test_some_known_numbers(self): self.assertEqual(encode(572), 4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase):", "+= 1 if encode(16) == encode(60): errors += 1 if encode(17) == encode(70):", "changes the check-digit.\"\"\" for i in range(10): si = str(i) for j in", "1 return errors / float(checks) def count_switches(): \"\"\"Count the fraction of adjacent-digit-switch errors", "return errors / float(checks) def count_jump_twins(): \"\"\" Xaka -> Xbkb \"\"\" checks =", "== encode(60): errors += 1 if encode(17) == encode(70): errors += 1 if", "encode(60): errors += 1 if encode(17) == encode(70): errors += 1 if encode(18)", "self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class DigitTest(unittest.TestCase): def test_simple_digit(self): \"\"\"Check that changing a single digit changes", "l in range(10): sl = str(l) for a in range(10): sa = str(a)", "that a single digit switch changes the result.\"\"\" for i in range(9): si", "str(b) checks += 1 if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa): errors += 1 return errors", "count_twins(): \"\"\"Count the number of twin errors missed (aa -> bb).\"\"\" checks =", "range(a+1,10): sb = str(b) checks += 1 if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa): errors +=", "+= 1 return errors / float(checks) def count_jump_twins(): \"\"\" Xaka -> Xbkb \"\"\"", "str(j) checks += 1 if encode(sk+si+si) == encode(sk+sj+sj): errors += 1 return errors", "encode(sk+sb+sl+sa): errors += 1 return errors / float(checks) def count_jump_twins(): \"\"\" Xaka ->", "encode(50): errors += 1 if encode(16) == encode(60): errors += 1 if encode(17)", "encode(sk+si+si) == encode(sk+sj+sj): errors += 1 return errors / float(checks) def count_jump_switch(): \"\"\"", "self.assertNotEqual(encode(si+sj), encode(sj+si)) def test_single_switch_offset(self): \"\"\"Test that a single digit switch changes the result,", "range(10): sl = str(l) for a in range(9): sa = str(a) for b", "== encode(sk+sb+sl+sa): errors += 1 return errors / float(checks) def count_jump_twins(): \"\"\" Xaka", "+= 1 if encode(sk+si+si) == encode(sk+sj+sj): errors += 1 return errors / float(checks)", "in range(10): si = str(i) for j in range(10): sj = str(j) checks", "sk = str(k) for i in range(9): si = str(i) for j in", "/ float(checks) def count_twins(): \"\"\"Count the number of twin errors missed (aa ->", "1 if encode(17) == encode(70): errors += 1 if encode(18) == encode(80): errors", "in range(i+1, 10): sj = str(j) checks += 1 if encode(sk+si+sj) == encode(sk+sj+si):", "the result, no matter the digit before.\"\"\" for k in range(10): sk =", "DigitTest(unittest.TestCase): def test_simple_digit(self): \"\"\"Check that changing a single digit changes the check-digit.\"\"\" for", "encode(17) == encode(70): errors += 1 if encode(18) == encode(80): errors += 1", "encode(si+sj) == encode(si+sk): errors += 1 return errors / float(checks) def count_switches(): \"\"\"Count", "== encode(70): errors += 1 if encode(18) == encode(80): errors += 1 if", "j in range(i+1, 10): sj = str(j) checks += 1 if encode(sk+si+sj) ==", "b in range(a+1,10): sb = str(b) checks += 1 if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa):", "if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa): errors += 1 return errors / float(checks) def count_jump_twins():", "checks += 1 if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa): errors += 1 return errors /", "0 for k in range(10): sk = str(k) for i in range(10): si", "= str(i) for j in range(i+1, 10): sj = str(j) checks += 1", "= 7 errors = 0 if encode(13) == encode(30): errors += 1 if", "= str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class DigitTest(unittest.TestCase): def test_simple_digit(self): \"\"\"Check that changing a single", "in range(10): sk = str(k) if j != k: checks += 1 if", "import unittest class GeneralTest(unittest.TestCase): def test_some_known_numbers(self): self.assertEqual(encode(572), 4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723)) class", "encode(40)) self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19), encode(90)) def count_singles():", "+= 1 if encode(19) == encode(90): errors += 1 return errors / float(checks)", "sb = str(b) checks += 1 if encode(sk+sa+sl+sa) == encode(sk+sb+sl+sb): errors += 1", "= str(j) self.assertNotEqual(encode(si+sj), encode(sj+si)) def test_single_switch_offset(self): \"\"\"Test that a single digit switch changes", "1 if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa): errors += 1 return errors / float(checks) def", "PhoneticTest(unittest.TestCase): def test_phonetic(self): \"\"\"Check that a range of phonetic errors are caught by", "for k in range(10): sk = str(k) if j != k: self.assertNotEqual( encode(si+sj),", "errors missed (aa -> bb).\"\"\" checks = 0 errors = 0 for k", "-> bb).\"\"\" checks = 0 errors = 0 for k in range(10): sk", "changing a single digit changes the check-digit.\"\"\" for i in range(10): si =", "count_switches(): \"\"\"Count the fraction of adjacent-digit-switch errors missed.\"\"\" checks = 0 errors =", "class GeneralTest(unittest.TestCase): def test_some_known_numbers(self): self.assertEqual(encode(572), 4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase): def", "+= 1 if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa): errors += 1 return errors / float(checks)", "by the check digit.\"\"\" self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17),", "encode(16) == encode(60): errors += 1 if encode(17) == encode(70): errors += 1", "that a range of phonetic errors are caught by the check digit.\"\"\" self.assertNotEqual(encode(13),", "errors / float(checks) def count_switches(): \"\"\"Count the fraction of adjacent-digit-switch errors missed.\"\"\" checks", "range(10): sk = str(k) for l in range(10): sl = str(l) for a", "-> cba \"\"\" checks = 0 errors = 0 for k in range(10):", "<reponame>jdmacleod/damm<filename>test_damm.py from damm import encode, check import unittest class GeneralTest(unittest.TestCase): def test_some_known_numbers(self): self.assertEqual(encode(572),", "for j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(si+sj), encode(sj+si)) def test_single_switch_offset(self): \"\"\"Test", "self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase): def test_simple_switch(self): \"\"\"Test that a single digit switch", "sj = str(j) for k in range(10): sk = str(k) if j !=", "of phonetic errors are caught by the check digit.\"\"\" self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14), encode(40))", "def count_twins(): \"\"\"Count the number of twin errors missed (aa -> bb).\"\"\" checks", "test_phonetic(self): \"\"\"Check that a range of phonetic errors are caught by the check", "= str(k) for l in range(10): sl = str(l) for a in range(10):", "range(10): sb = str(b) checks += 1 if encode(sk+sa+sl+sa) == encode(sk+sb+sl+sb): errors +=", "check digit.\"\"\" self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18),", "if encode(si+sj) == encode(si+sk): errors += 1 return errors / float(checks) def count_switches():", "switch changes the result, no matter the digit before.\"\"\" for k in range(10):", "errors += 1 return errors / float(checks) def count_twins(): \"\"\"Count the number of", "if encode(18) == encode(80): errors += 1 if encode(19) == encode(90): errors +=", "= str(i) for j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class", "in range(10): sa = str(a) for b in range(10): sb = str(b) checks", "k in range(10): sk = str(k) if j != k: checks += 1", "errors += 1 return errors / float(checks) def count_switches(): \"\"\"Count the fraction of", "fraction of phonetic errors missed.\"\"\" checks = 7 errors = 0 if encode(13)", "/ float(checks) def count_jump_switch(): \"\"\" abc -> cba \"\"\" checks = 0 errors", "= str(j) checks += 1 if encode(sk+si+sj) == encode(sk+sj+si): errors += 1 return", "count_singles(): \"\"\"Count the fraction of single-digit errors missed.\"\"\" checks = 0 errors =", "range(i+1, 10): sj = str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class DigitTest(unittest.TestCase): def test_simple_digit(self): \"\"\"Check that", "for k in range(10): sk = str(k) if j != k: checks +=", "single digit switch changes the result, no matter the digit before.\"\"\" for k", "= 0 for k in range(10): sk = str(k) for i in range(9):", "str(b) checks += 1 if encode(sk+sa+sl+sa) == encode(sk+sb+sl+sb): errors += 1 return errors", "encode(80)) self.assertNotEqual(encode(19), encode(90)) def count_singles(): \"\"\"Count the fraction of single-digit errors missed.\"\"\" checks", "self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19), encode(90)) def count_singles(): \"\"\"Count the fraction of single-digit", "for j in range(i+1, 10): sj = str(j) checks += 1 if encode(sk+si+sj)", "1 if encode(16) == encode(60): errors += 1 if encode(17) == encode(70): errors", "= str(k) for i in range(9): si = str(i) for j in range(i+1,", "errors missed.\"\"\" checks = 0 errors = 0 for k in range(10): sk", "= 0 for k in range(10): sk = str(k) for l in range(10):", "str(j) self.assertNotEqual(encode(si+sj), encode(sj+si)) def test_single_switch_offset(self): \"\"\"Test that a single digit switch changes the", "errors / float(checks) def count_jump_switch(): \"\"\" abc -> cba \"\"\" checks = 0", "/ float(checks) def count_phonetics(): \"\"\"Count the fraction of phonetic errors missed.\"\"\" checks =", "encode, check import unittest class GeneralTest(unittest.TestCase): def test_some_known_numbers(self): self.assertEqual(encode(572), 4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9)", "in range(10): sj = str(j) checks += 1 if encode(sk+si+si) == encode(sk+sj+sj): errors", "1 if encode(19) == encode(90): errors += 1 return errors / float(checks) def", "\"\"\" Xaka -> Xbkb \"\"\" checks = 0 errors = 0 for k", "the check-digit.\"\"\" for i in range(10): si = str(i) for j in range(10):", "in range(a+1,10): sb = str(b) checks += 1 if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa): errors", "return errors / float(checks) def count_twins(): \"\"\"Count the number of twin errors missed", "test_some_known_numbers(self): self.assertEqual(encode(572), 4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase): def test_simple_switch(self): \"\"\"Test that", "the fraction of phonetic errors missed.\"\"\" checks = 7 errors = 0 if", "= str(b) checks += 1 if encode(sk+sa+sl+sa) == encode(sk+sb+sl+sb): errors += 1 return", "encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa): errors += 1 return errors / float(checks) def count_jump_twins(): \"\"\"", "+= 1 if encode(sk+sa+sl+sa) == encode(sk+sb+sl+sb): errors += 1 return errors / float(checks)", "str(i) for j in range(i+1, 10): sj = str(j) checks += 1 if", "for j in range(10): sj = str(j) for k in range(10): sk =", "self.assertNotEqual( encode(si+sj), encode(si+sk)) class PhoneticTest(unittest.TestCase): def test_phonetic(self): \"\"\"Check that a range of phonetic", "= str(j) checks += 1 if encode(sk+si+si) == encode(sk+sj+sj): errors += 1 return", "GeneralTest(unittest.TestCase): def test_some_known_numbers(self): self.assertEqual(encode(572), 4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase): def test_simple_switch(self):", "digit.\"\"\" self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18), encode(80))", "encode(14) == encode(40): errors += 1 if encode(15) == encode(50): errors += 1", "== encode(50): errors += 1 if encode(16) == encode(60): errors += 1 if", "digit before.\"\"\" for k in range(10): sk = str(k) for i in range(9):", "range(10): sk = str(k) if j != k: self.assertNotEqual( encode(si+sj), encode(si+sk)) class PhoneticTest(unittest.TestCase):", "encode(si+sj), encode(si+sk)) class PhoneticTest(unittest.TestCase): def test_phonetic(self): \"\"\"Check that a range of phonetic errors", "= str(k) for i in range(10): si = str(i) for j in range(10):", "check-digit.\"\"\" for i in range(10): si = str(i) for j in range(10): sj", "= str(a) for b in range(10): sb = str(b) checks += 1 if", "a in range(9): sa = str(a) for b in range(a+1,10): sb = str(b)", "return errors / float(checks) def count_jump_switch(): \"\"\" abc -> cba \"\"\" checks =", "encode(90): errors += 1 return errors / float(checks) def count_twins(): \"\"\"Count the number", "if encode(13) == encode(30): errors += 1 if encode(14) == encode(40): errors +=", "of single-digit errors missed.\"\"\" checks = 0 errors = 0 for i in", "self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase): def test_simple_switch(self): \"\"\"Test that a single digit", "range(10): sa = str(a) for b in range(10): sb = str(b) checks +=", "for i in range(9): si = str(i) for j in range(i+1, 10): sj", "10): sj = str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class DigitTest(unittest.TestCase): def test_simple_digit(self): \"\"\"Check that changing", "10): sj = str(j) checks += 1 if encode(sk+si+sj) == encode(sk+sj+si): errors +=", "def test_simple_switch(self): \"\"\"Test that a single digit switch changes the result.\"\"\" for i", "for b in range(10): sb = str(b) checks += 1 if encode(sk+sa+sl+sa) ==", "a single digit switch changes the result.\"\"\" for i in range(9): si =", "sj = str(j) checks += 1 if encode(sk+si+si) == encode(sk+sj+sj): errors += 1", "count_phonetics(): \"\"\"Count the fraction of phonetic errors missed.\"\"\" checks = 7 errors =", "sk = str(k) if j != k: self.assertNotEqual( encode(si+sj), encode(si+sk)) class PhoneticTest(unittest.TestCase): def", "str(i) for j in range(10): sj = str(j) checks += 1 if encode(sk+si+si)", "count_jump_twins(): \"\"\" Xaka -> Xbkb \"\"\" checks = 0 errors = 0 for", "missed (aa -> bb).\"\"\" checks = 0 errors = 0 for k in", "encode(sj+si)) def test_single_switch_offset(self): \"\"\"Test that a single digit switch changes the result, no", "self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase): def test_simple_switch(self): \"\"\"Test that a single digit switch changes the", "switch changes the result.\"\"\" for i in range(9): si = str(i) for j", "a single digit switch changes the result, no matter the digit before.\"\"\" for", "k: checks += 1 if encode(si+sj) == encode(si+sk): errors += 1 return errors", "encode(30): errors += 1 if encode(14) == encode(40): errors += 1 if encode(15)", "sk = str(k) for i in range(10): si = str(i) for j in", "errors / float(checks) def count_twins(): \"\"\"Count the number of twin errors missed (aa", "= 0 for i in range(10): si = str(i) for j in range(10):", "k in range(10): sk = str(k) for i in range(10): si = str(i)", "!= k: self.assertNotEqual( encode(si+sj), encode(si+sk)) class PhoneticTest(unittest.TestCase): def test_phonetic(self): \"\"\"Check that a range", "range(9): sa = str(a) for b in range(a+1,10): sb = str(b) checks +=", "if encode(16) == encode(60): errors += 1 if encode(17) == encode(70): errors +=", "sk = str(k) if j != k: checks += 1 if encode(si+sj) ==", "j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(si+sj), encode(sj+si)) def test_single_switch_offset(self): \"\"\"Test that", "range(9): si = str(i) for j in range(i+1, 10): sj = str(j) checks", "missed.\"\"\" checks = 0 errors = 0 for i in range(10): si =", "\"\"\"Count the number of twin errors missed (aa -> bb).\"\"\" checks = 0", "1 return errors / float(checks) def count_jump_switch(): \"\"\" abc -> cba \"\"\" checks", "check import unittest class GeneralTest(unittest.TestCase): def test_some_known_numbers(self): self.assertEqual(encode(572), 4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723))", "def count_jump_switch(): \"\"\" abc -> cba \"\"\" checks = 0 errors = 0", "encode(19) == encode(90): errors += 1 return errors / float(checks) def count_twins(): \"\"\"Count", "range(9): si = str(i) for j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(si+sj),", "return errors / float(checks) def count_switches(): \"\"\"Count the fraction of adjacent-digit-switch errors missed.\"\"\"", "a in range(10): sa = str(a) for b in range(10): sb = str(b)", "str(i) for j in range(10): sj = str(j) for k in range(10): sk", "class PhoneticTest(unittest.TestCase): def test_phonetic(self): \"\"\"Check that a range of phonetic errors are caught", "number of twin errors missed (aa -> bb).\"\"\" checks = 0 errors =", "b in range(10): sb = str(b) checks += 1 if encode(sk+sa+sl+sa) == encode(sk+sb+sl+sb):", "abc -> cba \"\"\" checks = 0 errors = 0 for k in", "0 errors = 0 for k in range(10): sk = str(k) for i", "range(10): si = str(i) for j in range(10): sj = str(j) for k", "str(k) if j != k: self.assertNotEqual( encode(si+sj), encode(si+sk)) class PhoneticTest(unittest.TestCase): def test_phonetic(self): \"\"\"Check", "+= 1 if encode(15) == encode(50): errors += 1 if encode(16) == encode(60):", "import encode, check import unittest class GeneralTest(unittest.TestCase): def test_some_known_numbers(self): self.assertEqual(encode(572), 4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'),", "j in range(10): sj = str(j) checks += 1 if encode(sk+si+si) == encode(sk+sj+sj):", "!= k: checks += 1 if encode(si+sj) == encode(si+sk): errors += 1 return", "range(10): sj = str(j) for k in range(10): sk = str(k) if j", "1 if encode(sk+si+sj) == encode(sk+sj+si): errors += 1 return errors / float(checks) def", "str(a) for b in range(a+1,10): sb = str(b) checks += 1 if encode(sk+sa+sl+sb)", "str(i) for j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class DigitTest(unittest.TestCase):", "k: self.assertNotEqual( encode(si+sj), encode(si+sk)) class PhoneticTest(unittest.TestCase): def test_phonetic(self): \"\"\"Check that a range of", "+= 1 if encode(si+sj) == encode(si+sk): errors += 1 return errors / float(checks)", "the check digit.\"\"\" self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17), encode(70))", "0 for k in range(10): sk = str(k) for l in range(10): sl", "0 for k in range(10): sk = str(k) for i in range(9): si", "def test_simple_digit(self): \"\"\"Check that changing a single digit changes the check-digit.\"\"\" for i", "for a in range(9): sa = str(a) for b in range(a+1,10): sb =", "str(k) for i in range(10): si = str(i) for j in range(10): sj", "the number of twin errors missed (aa -> bb).\"\"\" checks = 0 errors", "+= 1 if encode(18) == encode(80): errors += 1 if encode(19) == encode(90):", "class DigitTest(unittest.TestCase): def test_simple_digit(self): \"\"\"Check that changing a single digit changes the check-digit.\"\"\"", "j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class DigitTest(unittest.TestCase): def test_simple_digit(self):", "= str(k) for l in range(10): sl = str(l) for a in range(9):", "1 if encode(sk+si+si) == encode(sk+sj+sj): errors += 1 return errors / float(checks) def", "a range of phonetic errors are caught by the check digit.\"\"\" self.assertNotEqual(encode(13), encode(30))", "SwitchTest(unittest.TestCase): def test_simple_switch(self): \"\"\"Test that a single digit switch changes the result.\"\"\" for", "\"\"\"Test that a single digit switch changes the result.\"\"\" for i in range(9):", "in range(10): sk = str(k) for l in range(10): sl = str(l) for", "errors / float(checks) def count_phonetics(): \"\"\"Count the fraction of phonetic errors missed.\"\"\" checks", "digit switch changes the result.\"\"\" for i in range(9): si = str(i) for", "\"\"\"Count the fraction of phonetic errors missed.\"\"\" checks = 7 errors = 0", "k in range(10): sk = str(k) for l in range(10): sl = str(l)", "checks += 1 if encode(sk+si+si) == encode(sk+sj+sj): errors += 1 return errors /", "the fraction of single-digit errors missed.\"\"\" checks = 0 errors = 0 for", "errors are caught by the check digit.\"\"\" self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15), encode(50))", "float(checks) def count_switches(): \"\"\"Count the fraction of adjacent-digit-switch errors missed.\"\"\" checks = 0", "def test_some_known_numbers(self): self.assertEqual(encode(572), 4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase): def test_simple_switch(self): \"\"\"Test", "that a single digit switch changes the result, no matter the digit before.\"\"\"", "if encode(17) == encode(70): errors += 1 if encode(18) == encode(80): errors +=", "test_single_switch_offset(self): \"\"\"Test that a single digit switch changes the result, no matter the", "+= 1 if encode(14) == encode(40): errors += 1 if encode(15) == encode(50):", "the digit before.\"\"\" for k in range(10): sk = str(k) for i in", "str(k) for i in range(9): si = str(i) for j in range(i+1, 10):", "j in range(10): sj = str(j) for k in range(10): sk = str(k)", "are caught by the check digit.\"\"\" self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16),", "twin errors missed (aa -> bb).\"\"\" checks = 0 errors = 0 for", "float(checks) def count_phonetics(): \"\"\"Count the fraction of phonetic errors missed.\"\"\" checks = 7", "in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(si+sj), encode(sj+si)) def test_single_switch_offset(self): \"\"\"Test that a", "encode(70): errors += 1 if encode(18) == encode(80): errors += 1 if encode(19)", "from damm import encode, check import unittest class GeneralTest(unittest.TestCase): def test_some_known_numbers(self): self.assertEqual(encode(572), 4)", "0 errors = 0 for k in range(10): sk = str(k) for l", "encode(si+sk)) class PhoneticTest(unittest.TestCase): def test_phonetic(self): \"\"\"Check that a range of phonetic errors are", "result.\"\"\" for i in range(9): si = str(i) for j in range(i+1, 10):", "matter the digit before.\"\"\" for k in range(10): sk = str(k) for i", "range(10): si = str(i) for j in range(10): sj = str(j) checks +=", "self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19), encode(90)) def count_singles(): \"\"\"Count the fraction", "== encode(90): errors += 1 return errors / float(checks) def count_twins(): \"\"\"Count the", "encode(si+sk): errors += 1 return errors / float(checks) def count_switches(): \"\"\"Count the fraction", "no matter the digit before.\"\"\" for k in range(10): sk = str(k) for", "errors += 1 return errors / float(checks) def count_jump_switch(): \"\"\" abc -> cba", "/ float(checks) def count_jump_twins(): \"\"\" Xaka -> Xbkb \"\"\" checks = 0 errors", "1 return errors / float(checks) def count_jump_twins(): \"\"\" Xaka -> Xbkb \"\"\" checks", "= str(l) for a in range(10): sa = str(a) for b in range(10):", "errors = 0 for k in range(10): sk = str(k) for l in", "encode(80): errors += 1 if encode(19) == encode(90): errors += 1 return errors", "self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19), encode(90)) def count_singles(): \"\"\"Count the fraction of single-digit errors missed.\"\"\"", "= str(i) for j in range(10): sj = str(j) checks += 1 if", "caught by the check digit.\"\"\" self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16), encode(60))", "str(a) for b in range(10): sb = str(b) checks += 1 if encode(sk+sa+sl+sa)", "class SwitchTest(unittest.TestCase): def test_simple_switch(self): \"\"\"Test that a single digit switch changes the result.\"\"\"", "for b in range(a+1,10): sb = str(b) checks += 1 if encode(sk+sa+sl+sb) ==", "\"\"\"Count the fraction of single-digit errors missed.\"\"\" checks = 0 errors = 0", "encode(sk+sj+si): errors += 1 return errors / float(checks) def count_phonetics(): \"\"\"Count the fraction", "sa = str(a) for b in range(10): sb = str(b) checks += 1", "adjacent-digit-switch errors missed.\"\"\" checks = 0 errors = 0 for k in range(10):", "0 errors = 0 for i in range(10): si = str(i) for j", "encode(70)) self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19), encode(90)) def count_singles(): \"\"\"Count the fraction of single-digit errors", "encode(15) == encode(50): errors += 1 if encode(16) == encode(60): errors += 1", "if encode(15) == encode(50): errors += 1 if encode(16) == encode(60): errors +=", "digit switch changes the result, no matter the digit before.\"\"\" for k in", "1 return errors / float(checks) def count_twins(): \"\"\"Count the number of twin errors", "encode(18) == encode(80): errors += 1 if encode(19) == encode(90): errors += 1", "== encode(30): errors += 1 if encode(14) == encode(40): errors += 1 if", "4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase): def test_simple_switch(self): \"\"\"Test that a single", "def count_singles(): \"\"\"Count the fraction of single-digit errors missed.\"\"\" checks = 0 errors", "1 return errors / float(checks) def count_phonetics(): \"\"\"Count the fraction of phonetic errors", "si = str(i) for j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si))", "sl = str(l) for a in range(9): sa = str(a) for b in", "si = str(i) for j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(si+sj), encode(sj+si))", "changes the result.\"\"\" for i in range(9): si = str(i) for j in", "errors = 0 if encode(13) == encode(30): errors += 1 if encode(14) ==", "encode(sk+sj+sj): errors += 1 return errors / float(checks) def count_jump_switch(): \"\"\" abc ->", "str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class DigitTest(unittest.TestCase): def test_simple_digit(self): \"\"\"Check that changing a single digit", "def test_phonetic(self): \"\"\"Check that a range of phonetic errors are caught by the", "= str(i) for j in range(10): sj = str(j) for k in range(10):", "missed.\"\"\" checks = 0 errors = 0 for k in range(10): sk =", "in range(9): sa = str(a) for b in range(a+1,10): sb = str(b) checks", "= 0 for k in range(10): sk = str(k) for i in range(10):", "fraction of single-digit errors missed.\"\"\" checks = 0 errors = 0 for i", "def count_switches(): \"\"\"Count the fraction of adjacent-digit-switch errors missed.\"\"\" checks = 0 errors", "str(l) for a in range(10): sa = str(a) for b in range(10): sb", "encode(50)) self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19), encode(90)) def count_singles(): \"\"\"Count the", "damm import encode, check import unittest class GeneralTest(unittest.TestCase): def test_some_known_numbers(self): self.assertEqual(encode(572), 4) self.assertTrue(check(5724))", "== encode(80): errors += 1 if encode(19) == encode(90): errors += 1 return", "if encode(19) == encode(90): errors += 1 return errors / float(checks) def count_twins():", "checks += 1 if encode(si+sj) == encode(si+sk): errors += 1 return errors /", "range(i+1, 10): sj = str(j) self.assertNotEqual(encode(si+sj), encode(sj+si)) def test_single_switch_offset(self): \"\"\"Test that a single", "errors / float(checks) def count_jump_twins(): \"\"\" Xaka -> Xbkb \"\"\" checks = 0", "= 0 errors = 0 for i in range(10): si = str(i) for", "1 if encode(si+sj) == encode(si+sk): errors += 1 return errors / float(checks) def", "si = str(i) for j in range(10): sj = str(j) checks += 1", "= str(k) if j != k: checks += 1 if encode(si+sj) == encode(si+sk):", "-> Xbkb \"\"\" checks = 0 errors = 0 for k in range(10):", "sk = str(k) for l in range(10): sl = str(l) for a in", "range(10): sl = str(l) for a in range(10): sa = str(a) for b", "float(checks) def count_jump_switch(): \"\"\" abc -> cba \"\"\" checks = 0 errors =", "str(k) if j != k: checks += 1 if encode(si+sj) == encode(si+sk): errors", "sj = str(j) checks += 1 if encode(sk+si+sj) == encode(sk+sj+si): errors += 1", "sa = str(a) for b in range(a+1,10): sb = str(b) checks += 1", "range(10): sk = str(k) if j != k: checks += 1 if encode(si+sj)", "for a in range(10): sa = str(a) for b in range(10): sb =", "for j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class DigitTest(unittest.TestCase): def", "def count_jump_twins(): \"\"\" Xaka -> Xbkb \"\"\" checks = 0 errors = 0", "encode(60)) self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19), encode(90)) def count_singles(): \"\"\"Count the fraction of", "j != k: checks += 1 if encode(si+sj) == encode(si+sk): errors += 1", "encode(30)) self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19), encode(90))", "range(10): sk = str(k) for i in range(10): si = str(i) for j", "sj = str(j) self.assertNotEqual(encode(si+sj), encode(sj+si)) def test_single_switch_offset(self): \"\"\"Test that a single digit switch", "+= 1 if encode(17) == encode(70): errors += 1 if encode(18) == encode(80):", "+= 1 if encode(sk+si+sj) == encode(sk+sj+si): errors += 1 return errors / float(checks)", "sj = str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class DigitTest(unittest.TestCase): def test_simple_digit(self): \"\"\"Check that changing a", "k in range(10): sk = str(k) if j != k: self.assertNotEqual( encode(si+sj), encode(si+sk))", "Xaka -> Xbkb \"\"\" checks = 0 errors = 0 for k in", "self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19), encode(90)) def", "str(j) for k in range(10): sk = str(k) if j != k: self.assertNotEqual(", "of phonetic errors missed.\"\"\" checks = 7 errors = 0 if encode(13) ==", "for l in range(10): sl = str(l) for a in range(10): sa =", "errors += 1 if encode(17) == encode(70): errors += 1 if encode(18) ==", "the result.\"\"\" for i in range(9): si = str(i) for j in range(i+1,", "in range(10): sk = str(k) for i in range(10): si = str(i) for", "in range(10): sl = str(l) for a in range(10): sa = str(a) for", "0 if encode(13) == encode(30): errors += 1 if encode(14) == encode(40): errors", "count_jump_switch(): \"\"\" abc -> cba \"\"\" checks = 0 errors = 0 for", "checks = 0 errors = 0 for i in range(10): si = str(i)", "si = str(i) for j in range(i+1, 10): sj = str(j) checks +=", "+= 1 return errors / float(checks) def count_switches(): \"\"\"Count the fraction of adjacent-digit-switch", "cba \"\"\" checks = 0 errors = 0 for k in range(10): sk", "== encode(40): errors += 1 if encode(15) == encode(50): errors += 1 if", "str(k) for l in range(10): sl = str(l) for a in range(10): sa", "si = str(i) for j in range(10): sj = str(j) for k in", "if encode(sk+si+si) == encode(sk+sj+sj): errors += 1 return errors / float(checks) def count_jump_switch():", "/ float(checks) def count_switches(): \"\"\"Count the fraction of adjacent-digit-switch errors missed.\"\"\" checks =", "errors += 1 if encode(15) == encode(50): errors += 1 if encode(16) ==", "= 0 if encode(13) == encode(30): errors += 1 if encode(14) == encode(40):", "in range(10): si = str(i) for j in range(10): sj = str(j) for", "self.assertEqual(encode(572), 4) self.assertTrue(check(5724)) self.assertEqual(encode('43881234567'), 9) self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase): def test_simple_switch(self): \"\"\"Test that a", "encode(13) == encode(30): errors += 1 if encode(14) == encode(40): errors += 1", "== encode(si+sk): errors += 1 return errors / float(checks) def count_switches(): \"\"\"Count the", "float(checks) def count_jump_twins(): \"\"\" Xaka -> Xbkb \"\"\" checks = 0 errors =", "str(i) for j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(si+sj), encode(sj+si)) def test_single_switch_offset(self):", "for l in range(10): sl = str(l) for a in range(9): sa =", "str(j) for k in range(10): sk = str(k) if j != k: checks", "if j != k: checks += 1 if encode(si+sj) == encode(si+sk): errors +=", "phonetic errors missed.\"\"\" checks = 7 errors = 0 if encode(13) == encode(30):", "\"\"\"Count the fraction of adjacent-digit-switch errors missed.\"\"\" checks = 0 errors = 0", "test_simple_digit(self): \"\"\"Check that changing a single digit changes the check-digit.\"\"\" for i in", "encode(90)) def count_singles(): \"\"\"Count the fraction of single-digit errors missed.\"\"\" checks = 0", "= 0 errors = 0 for k in range(10): sk = str(k) for", "in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(sk+si+sj), encode(sk+sj+si)) class DigitTest(unittest.TestCase): def test_simple_digit(self): \"\"\"Check", "in range(10): sj = str(j) for k in range(10): sk = str(k) if", "1 if encode(18) == encode(80): errors += 1 if encode(19) == encode(90): errors", "errors += 1 if encode(18) == encode(80): errors += 1 if encode(19) ==", "j != k: self.assertNotEqual( encode(si+sj), encode(si+sk)) class PhoneticTest(unittest.TestCase): def test_phonetic(self): \"\"\"Check that a", "single-digit errors missed.\"\"\" checks = 0 errors = 0 for i in range(10):", "result, no matter the digit before.\"\"\" for k in range(10): sk = str(k)", "checks = 7 errors = 0 if encode(13) == encode(30): errors += 1", "self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19), encode(90)) def count_singles(): \"\"\"Count", "checks += 1 if encode(sk+si+sj) == encode(sk+sj+si): errors += 1 return errors /", "i in range(10): si = str(i) for j in range(10): sj = str(j)", "range of phonetic errors are caught by the check digit.\"\"\" self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14),", "single digit switch changes the result.\"\"\" for i in range(9): si = str(i)", "l in range(10): sl = str(l) for a in range(9): sa = str(a)", "== encode(sk+sj+si): errors += 1 return errors / float(checks) def count_phonetics(): \"\"\"Count the", "in range(10): sk = str(k) for i in range(9): si = str(i) for", "encode(sk+sj+si)) class DigitTest(unittest.TestCase): def test_simple_digit(self): \"\"\"Check that changing a single digit changes the", "for k in range(10): sk = str(k) for i in range(10): si =", "bb).\"\"\" checks = 0 errors = 0 for k in range(10): sk =", "range(10): sk = str(k) for i in range(9): si = str(i) for j", "10): sj = str(j) self.assertNotEqual(encode(si+sj), encode(sj+si)) def test_single_switch_offset(self): \"\"\"Test that a single digit", "if encode(sk+si+sj) == encode(sk+sj+si): errors += 1 return errors / float(checks) def count_phonetics():", "0 for i in range(10): si = str(i) for j in range(10): sj", "Xbkb \"\"\" checks = 0 errors = 0 for k in range(10): sk", "test_simple_switch(self): \"\"\"Test that a single digit switch changes the result.\"\"\" for i in", "def count_phonetics(): \"\"\"Count the fraction of phonetic errors missed.\"\"\" checks = 7 errors", "= str(j) for k in range(10): sk = str(k) if j != k:", "7 errors = 0 if encode(13) == encode(30): errors += 1 if encode(14)", "errors missed.\"\"\" checks = 0 errors = 0 for i in range(10): si", "that changing a single digit changes the check-digit.\"\"\" for i in range(10): si", "= str(b) checks += 1 if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa): errors += 1 return", "errors missed.\"\"\" checks = 7 errors = 0 if encode(13) == encode(30): errors", "+= 1 return errors / float(checks) def count_phonetics(): \"\"\"Count the fraction of phonetic", "for j in range(10): sj = str(j) checks += 1 if encode(sk+si+si) ==", "\"\"\"Check that a range of phonetic errors are caught by the check digit.\"\"\"", "encode(sk+si+sj) == encode(sk+sj+si): errors += 1 return errors / float(checks) def count_phonetics(): \"\"\"Count", "in range(9): si = str(i) for j in range(i+1, 10): sj = str(j)", "1 if encode(15) == encode(50): errors += 1 if encode(16) == encode(60): errors", "encode(40): errors += 1 if encode(15) == encode(50): errors += 1 if encode(16)", "def test_single_switch_offset(self): \"\"\"Test that a single digit switch changes the result, no matter", "for i in range(10): si = str(i) for j in range(10): sj =", "of twin errors missed (aa -> bb).\"\"\" checks = 0 errors = 0", "\"\"\"Test that a single digit switch changes the result, no matter the digit", "sb = str(b) checks += 1 if encode(sk+sa+sl+sb) == encode(sk+sb+sl+sa): errors += 1", "before.\"\"\" for k in range(10): sk = str(k) for i in range(9): si", "errors += 1 return errors / float(checks) def count_jump_twins(): \"\"\" Xaka -> Xbkb", "errors += 1 return errors / float(checks) def count_phonetics(): \"\"\"Count the fraction of", "(aa -> bb).\"\"\" checks = 0 errors = 0 for k in range(10):", "= str(i) for j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(si+sj), encode(sj+si)) def", "for k in range(10): sk = str(k) for i in range(9): si =", "k in range(10): sk = str(k) for i in range(9): si = str(i)", "errors = 0 for i in range(10): si = str(i) for j in", "+= 1 return errors / float(checks) def count_twins(): \"\"\"Count the number of twin", "in range(10): sk = str(k) if j != k: self.assertNotEqual( encode(si+sj), encode(si+sk)) class", "phonetic errors are caught by the check digit.\"\"\" self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15),", "checks += 1 if encode(sk+sa+sl+sa) == encode(sk+sb+sl+sb): errors += 1 return errors /", "changes the result, no matter the digit before.\"\"\" for k in range(10): sk", "range(i+1, 10): sj = str(j) checks += 1 if encode(sk+si+sj) == encode(sk+sj+si): errors", "missed.\"\"\" checks = 7 errors = 0 if encode(13) == encode(30): errors +=", "errors += 1 if encode(16) == encode(60): errors += 1 if encode(17) ==", "str(k) for l in range(10): sl = str(l) for a in range(9): sa", "fraction of adjacent-digit-switch errors missed.\"\"\" checks = 0 errors = 0 for k", "\"\"\" checks = 0 errors = 0 for k in range(10): sk =", "== encode(sk+sj+sj): errors += 1 return errors / float(checks) def count_jump_switch(): \"\"\" abc", "i in range(9): si = str(i) for j in range(i+1, 10): sj =", "digit changes the check-digit.\"\"\" for i in range(10): si = str(i) for j", "= str(l) for a in range(9): sa = str(a) for b in range(a+1,10):", "\"\"\" abc -> cba \"\"\" checks = 0 errors = 0 for k", "float(checks) def count_twins(): \"\"\"Count the number of twin errors missed (aa -> bb).\"\"\"", "1 if encode(14) == encode(40): errors += 1 if encode(15) == encode(50): errors", "+= 1 return errors / float(checks) def count_jump_switch(): \"\"\" abc -> cba \"\"\"", "str(j) checks += 1 if encode(sk+si+sj) == encode(sk+sj+si): errors += 1 return errors", "for k in range(10): sk = str(k) for l in range(10): sl =", "in range(10): sl = str(l) for a in range(9): sa = str(a) for", "range(9): si = str(i) for j in range(i+1, 10): sj = str(j) self.assertNotEqual(encode(sk+si+sj),", "in range(10): sb = str(b) checks += 1 if encode(sk+sa+sl+sa) == encode(sk+sb+sl+sb): errors", "= str(a) for b in range(a+1,10): sb = str(b) checks += 1 if", "if encode(14) == encode(40): errors += 1 if encode(15) == encode(50): errors +=", "str(l) for a in range(9): sa = str(a) for b in range(a+1,10): sb", "of adjacent-digit-switch errors missed.\"\"\" checks = 0 errors = 0 for k in", "sl = str(l) for a in range(10): sa = str(a) for b in", "errors += 1 if encode(14) == encode(40): errors += 1 if encode(15) ==", "self.assertNotEqual(encode(13), encode(30)) self.assertNotEqual(encode(14), encode(40)) self.assertNotEqual(encode(15), encode(50)) self.assertNotEqual(encode(16), encode(60)) self.assertNotEqual(encode(17), encode(70)) self.assertNotEqual(encode(18), encode(80)) self.assertNotEqual(encode(19),", "9) self.assertFalse(check(5723)) class SwitchTest(unittest.TestCase): def test_simple_switch(self): \"\"\"Test that a single digit switch changes", "if j != k: self.assertNotEqual( encode(si+sj), encode(si+sk)) class PhoneticTest(unittest.TestCase): def test_phonetic(self): \"\"\"Check that", "single digit changes the check-digit.\"\"\" for i in range(10): si = str(i) for", "self.assertNotEqual(encode(19), encode(90)) def count_singles(): \"\"\"Count the fraction of single-digit errors missed.\"\"\" checks =", "= str(k) if j != k: self.assertNotEqual( encode(si+sj), encode(si+sk)) class PhoneticTest(unittest.TestCase): def test_phonetic(self):", "checks = 0 errors = 0 for k in range(10): sk = str(k)", "errors = 0 for k in range(10): sk = str(k) for i in", "the fraction of adjacent-digit-switch errors missed.\"\"\" checks = 0 errors = 0 for", "errors += 1 if encode(19) == encode(90): errors += 1 return errors /", "\"\"\"Check that changing a single digit changes the check-digit.\"\"\" for i in range(10):", "a single digit changes the check-digit.\"\"\" for i in range(10): si = str(i)", "return errors / float(checks) def count_phonetics(): \"\"\"Count the fraction of phonetic errors missed.\"\"\"" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "/ 2} def compute_metrics(task_name: str, preds: List[int], labels: List[int]) -> Dict[str, float]: \"\"\"", "+ spearman_corr) / 2} def compute_metrics(task_name: str, preds: List[int], labels: List[int]) -> Dict[str,", "KIND, either express or implied. # See the License for the specific language", "length\") metric_fn = accuracy if task_name == 'cola': metric_fn = mcc elif task_name", "Unless required by applicable law or agreed to in writing, software # distributed", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "labels).mean() f1 = f1_score(y_true=labels, y_pred=preds) return {\"acc\": accuracy, \"f1\": f1} def mcc(preds: List[int],", "License. # You may obtain a copy of the License at # #", "{\"mcc\": matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds: List[int], labels: List[int]): pearson_corr = pearsonr(preds, labels)[0] spearman_corr", "rights reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "len(preds) != len(labels): raise ValueError(\"Predictions and labels must have the same length\") metric_fn", "(pearson_corr + spearman_corr) / 2} def compute_metrics(task_name: str, preds: List[int], labels: List[int]) ->", "= pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return {\"pearson\": pearson_corr, \"spearmanr\": spearman_corr, \"pear+spear", "sklearn.metrics import f1_score, matthews_corrcoef __all__ = ['compute_metrics'] def accuracy(preds: List[int], labels: List[int]): return", "metric_fn = acc_and_f1 elif task_name == 'sts-b': metric_fn = pearson_and_spearman return metric_fn(preds, labels)", "law or agreed to in writing, software # distributed under the License is", "mcc elif task_name in ['mrpc', 'qqp']: metric_fn = acc_and_f1 elif task_name == 'sts-b':", "the License for the specific language governing permissions and # limitations under the", "preds)} def pearson_and_spearman(preds: List[int], labels: List[int]): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds,", "compliance with the License. # You may obtain a copy of the License", "the specific language governing permissions and # limitations under the License. from typing", "= ['compute_metrics'] def accuracy(preds: List[int], labels: List[int]): return {\"acc\": (preds == labels).mean()} def", "Language Team Authors and # The HuggingFace Inc. team. # Copyright (c) 2020,", "__all__ = ['compute_metrics'] def accuracy(preds: List[int], labels: List[int]): return {\"acc\": (preds == labels).mean()}", "from sklearn.metrics import f1_score, matthews_corrcoef __all__ = ['compute_metrics'] def accuracy(preds: List[int], labels: List[int]):", "predictions labels: golden labels Returns: metrics \"\"\" if len(preds) != len(labels): raise ValueError(\"Predictions", "labels: List[int]): accuracy = (preds == labels).mean() f1 = f1_score(y_true=labels, y_pred=preds) return {\"acc\":", "preds: model predictions labels: golden labels Returns: metrics \"\"\" if len(preds) != len(labels):", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "List[int], labels: List[int]): return {\"mcc\": matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds: List[int], labels: List[int]): pearson_corr", "= spearmanr(preds, labels)[0] return {\"pearson\": pearson_corr, \"spearmanr\": spearman_corr, \"pear+spear av\": (pearson_corr + spearman_corr)", "this file except in compliance with the License. # You may obtain a", "metrics \"\"\" if len(preds) != len(labels): raise ValueError(\"Predictions and labels must have the", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "return {\"mcc\": matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds: List[int], labels: List[int]): pearson_corr = pearsonr(preds, labels)[0]", "labels)[0] spearman_corr = spearmanr(preds, labels)[0] return {\"pearson\": pearson_corr, \"spearmanr\": spearman_corr, \"pear+spear av\": (pearson_corr", "you may not use this file except in compliance with the License. #", "Dict[str, float]: \"\"\" Computes metrics for GLUE tasks Args: task_name: GLUE task name", "elif task_name in ['mrpc', 'qqp']: metric_fn = acc_and_f1 elif task_name == 'sts-b': metric_fn", "# Copyright 2018 The Google AI Language Team Authors and # The HuggingFace", "2020, <NAME>. All rights reserved. # # Licensed under the Apache License, Version", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "task_name == 'cola': metric_fn = mcc elif task_name in ['mrpc', 'qqp']: metric_fn =", "must have the same length\") metric_fn = accuracy if task_name == 'cola': metric_fn", "# The HuggingFace Inc. team. # Copyright (c) 2020, <NAME>. All rights reserved.", "ANY KIND, either express or implied. # See the License for the specific", "len(labels): raise ValueError(\"Predictions and labels must have the same length\") metric_fn = accuracy", "the License. from typing import Dict, List from scipy.stats import pearsonr, spearmanr from", "Computes metrics for GLUE tasks Args: task_name: GLUE task name preds: model predictions", "['mrpc', 'qqp']: metric_fn = acc_and_f1 elif task_name == 'sts-b': metric_fn = pearson_and_spearman return", "in compliance with the License. # You may obtain a copy of the", "Copyright 2018 The Google AI Language Team Authors and # The HuggingFace Inc.", "labels: List[int]) -> Dict[str, float]: \"\"\" Computes metrics for GLUE tasks Args: task_name:", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "== 'cola': metric_fn = mcc elif task_name in ['mrpc', 'qqp']: metric_fn = acc_and_f1", "use this file except in compliance with the License. # You may obtain", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "'qqp']: metric_fn = acc_and_f1 elif task_name == 'sts-b': metric_fn = pearson_and_spearman return metric_fn(preds,", "== labels).mean() f1 = f1_score(y_true=labels, y_pred=preds) return {\"acc\": accuracy, \"f1\": f1} def mcc(preds:", "under the License. from typing import Dict, List from scipy.stats import pearsonr, spearmanr", "(preds == labels).mean() f1 = f1_score(y_true=labels, y_pred=preds) return {\"acc\": accuracy, \"f1\": f1} def", "See the License for the specific language governing permissions and # limitations under", "{\"acc\": (preds == labels).mean()} def acc_and_f1(preds: List[int], labels: List[int]): accuracy = (preds ==", "labels)[0] return {\"pearson\": pearson_corr, \"spearmanr\": spearman_corr, \"pear+spear av\": (pearson_corr + spearman_corr) / 2}", "labels must have the same length\") metric_fn = accuracy if task_name == 'cola':", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "License, Version 2.0 (the \"License\"); # you may not use this file except", "labels).mean()} def acc_and_f1(preds: List[int], labels: List[int]): accuracy = (preds == labels).mean() f1 =", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "raise ValueError(\"Predictions and labels must have the same length\") metric_fn = accuracy if", "def accuracy(preds: List[int], labels: List[int]): return {\"acc\": (preds == labels).mean()} def acc_and_f1(preds: List[int],", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "pearson_and_spearman(preds: List[int], labels: List[int]): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "language governing permissions and # limitations under the License. from typing import Dict,", "pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return {\"pearson\": pearson_corr, \"spearmanr\": spearman_corr,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "\"f1\": f1} def mcc(preds: List[int], labels: List[int]): return {\"mcc\": matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds:", "from typing import Dict, List from scipy.stats import pearsonr, spearmanr from sklearn.metrics import", "the same length\") metric_fn = accuracy if task_name == 'cola': metric_fn = mcc", "labels: List[int]): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return {\"pearson\": pearson_corr,", "accuracy(preds: List[int], labels: List[int]): return {\"acc\": (preds == labels).mean()} def acc_and_f1(preds: List[int], labels:", "return {\"acc\": accuracy, \"f1\": f1} def mcc(preds: List[int], labels: List[int]): return {\"mcc\": matthews_corrcoef(labels,", "float]: \"\"\" Computes metrics for GLUE tasks Args: task_name: GLUE task name preds:", "OF ANY KIND, either express or implied. # See the License for the", "License. from typing import Dict, List from scipy.stats import pearsonr, spearmanr from sklearn.metrics", "spearman_corr = spearmanr(preds, labels)[0] return {\"pearson\": pearson_corr, \"spearmanr\": spearman_corr, \"pear+spear av\": (pearson_corr +", "spearmanr from sklearn.metrics import f1_score, matthews_corrcoef __all__ = ['compute_metrics'] def accuracy(preds: List[int], labels:", "2.0 (the \"License\"); # you may not use this file except in compliance", "accuracy, \"f1\": f1} def mcc(preds: List[int], labels: List[int]): return {\"mcc\": matthews_corrcoef(labels, preds)} def", "preds: List[int], labels: List[int]) -> Dict[str, float]: \"\"\" Computes metrics for GLUE tasks", "List[int], labels: List[int]) -> Dict[str, float]: \"\"\" Computes metrics for GLUE tasks Args:", "-> Dict[str, float]: \"\"\" Computes metrics for GLUE tasks Args: task_name: GLUE task", "Google AI Language Team Authors and # The HuggingFace Inc. team. # Copyright", "team. # Copyright (c) 2020, <NAME>. All rights reserved. # # Licensed under", "# you may not use this file except in compliance with the License.", "labels: List[int]): return {\"mcc\": matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds: List[int], labels: List[int]): pearson_corr =", "f1 = f1_score(y_true=labels, y_pred=preds) return {\"acc\": accuracy, \"f1\": f1} def mcc(preds: List[int], labels:", "'cola': metric_fn = mcc elif task_name in ['mrpc', 'qqp']: metric_fn = acc_and_f1 elif", "metric_fn = mcc elif task_name in ['mrpc', 'qqp']: metric_fn = acc_and_f1 elif task_name", "# limitations under the License. from typing import Dict, List from scipy.stats import", "agreed to in writing, software # distributed under the License is distributed on", "mcc(preds: List[int], labels: List[int]): return {\"mcc\": matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds: List[int], labels: List[int]):", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "permissions and # limitations under the License. from typing import Dict, List from", "List[int]): accuracy = (preds == labels).mean() f1 = f1_score(y_true=labels, y_pred=preds) return {\"acc\": accuracy,", "and # The HuggingFace Inc. team. # Copyright (c) 2020, <NAME>. All rights", "List[int], labels: List[int]): return {\"acc\": (preds == labels).mean()} def acc_and_f1(preds: List[int], labels: List[int]):", "(the \"License\"); # you may not use this file except in compliance with", "acc_and_f1(preds: List[int], labels: List[int]): accuracy = (preds == labels).mean() f1 = f1_score(y_true=labels, y_pred=preds)", "Copyright (c) 2020, <NAME>. All rights reserved. # # Licensed under the Apache", "Args: task_name: GLUE task name preds: model predictions labels: golden labels Returns: metrics", "compute_metrics(task_name: str, preds: List[int], labels: List[int]) -> Dict[str, float]: \"\"\" Computes metrics for", "same length\") metric_fn = accuracy if task_name == 'cola': metric_fn = mcc elif", "# # Unless required by applicable law or agreed to in writing, software", "2018 The Google AI Language Team Authors and # The HuggingFace Inc. team.", "\"\"\" Computes metrics for GLUE tasks Args: task_name: GLUE task name preds: model", "express or implied. # See the License for the specific language governing permissions", "List[int], labels: List[int]): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return {\"pearson\":", "Inc. team. # Copyright (c) 2020, <NAME>. All rights reserved. # # Licensed", "labels: List[int]): return {\"acc\": (preds == labels).mean()} def acc_and_f1(preds: List[int], labels: List[int]): accuracy", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "for the specific language governing permissions and # limitations under the License. from", "except in compliance with the License. # You may obtain a copy of", "model predictions labels: golden labels Returns: metrics \"\"\" if len(preds) != len(labels): raise", "accuracy if task_name == 'cola': metric_fn = mcc elif task_name in ['mrpc', 'qqp']:", "by applicable law or agreed to in writing, software # distributed under the", "specific language governing permissions and # limitations under the License. from typing import", "from scipy.stats import pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef __all__ = ['compute_metrics']", "Dict, List from scipy.stats import pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef __all__", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "y_pred=preds) return {\"acc\": accuracy, \"f1\": f1} def mcc(preds: List[int], labels: List[int]): return {\"mcc\":", "return {\"pearson\": pearson_corr, \"spearmanr\": spearman_corr, \"pear+spear av\": (pearson_corr + spearman_corr) / 2} def", "\"\"\" if len(preds) != len(labels): raise ValueError(\"Predictions and labels must have the same", "either express or implied. # See the License for the specific language governing", "pearson_corr, \"spearmanr\": spearman_corr, \"pear+spear av\": (pearson_corr + spearman_corr) / 2} def compute_metrics(task_name: str,", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "List[int], labels: List[int]): accuracy = (preds == labels).mean() f1 = f1_score(y_true=labels, y_pred=preds) return", "== labels).mean()} def acc_and_f1(preds: List[int], labels: List[int]): accuracy = (preds == labels).mean() f1", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "str, preds: List[int], labels: List[int]) -> Dict[str, float]: \"\"\" Computes metrics for GLUE", "<NAME>. All rights reserved. # # Licensed under the Apache License, Version 2.0", "f1_score(y_true=labels, y_pred=preds) return {\"acc\": accuracy, \"f1\": f1} def mcc(preds: List[int], labels: List[int]): return", "= (preds == labels).mean() f1 = f1_score(y_true=labels, y_pred=preds) return {\"acc\": accuracy, \"f1\": f1}", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "golden labels Returns: metrics \"\"\" if len(preds) != len(labels): raise ValueError(\"Predictions and labels", "metric_fn = accuracy if task_name == 'cola': metric_fn = mcc elif task_name in", "in ['mrpc', 'qqp']: metric_fn = acc_and_f1 elif task_name == 'sts-b': metric_fn = pearson_and_spearman", "The HuggingFace Inc. team. # Copyright (c) 2020, <NAME>. All rights reserved. #", "file except in compliance with the License. # You may obtain a copy", "spearman_corr, \"pear+spear av\": (pearson_corr + spearman_corr) / 2} def compute_metrics(task_name: str, preds: List[int],", "if len(preds) != len(labels): raise ValueError(\"Predictions and labels must have the same length\")", "av\": (pearson_corr + spearman_corr) / 2} def compute_metrics(task_name: str, preds: List[int], labels: List[int])", "!= len(labels): raise ValueError(\"Predictions and labels must have the same length\") metric_fn =", "task_name in ['mrpc', 'qqp']: metric_fn = acc_and_f1 elif task_name == 'sts-b': metric_fn =", "import f1_score, matthews_corrcoef __all__ = ['compute_metrics'] def accuracy(preds: List[int], labels: List[int]): return {\"acc\":", "f1_score, matthews_corrcoef __all__ = ['compute_metrics'] def accuracy(preds: List[int], labels: List[int]): return {\"acc\": (preds", "labels Returns: metrics \"\"\" if len(preds) != len(labels): raise ValueError(\"Predictions and labels must", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "List from scipy.stats import pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef __all__ =", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "{\"acc\": accuracy, \"f1\": f1} def mcc(preds: List[int], labels: List[int]): return {\"mcc\": matthews_corrcoef(labels, preds)}", "metrics for GLUE tasks Args: task_name: GLUE task name preds: model predictions labels:", "import pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef __all__ = ['compute_metrics'] def accuracy(preds:", "task_name: GLUE task name preds: model predictions labels: golden labels Returns: metrics \"\"\"", "the License. # You may obtain a copy of the License at #", "matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds: List[int], labels: List[int]): pearson_corr = pearsonr(preds, labels)[0] spearman_corr =", "GLUE tasks Args: task_name: GLUE task name preds: model predictions labels: golden labels", "AI Language Team Authors and # The HuggingFace Inc. team. # Copyright (c)", "f1} def mcc(preds: List[int], labels: List[int]): return {\"mcc\": matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds: List[int],", "to in writing, software # distributed under the License is distributed on an", "(preds == labels).mean()} def acc_and_f1(preds: List[int], labels: List[int]): accuracy = (preds == labels).mean()", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "if task_name == 'cola': metric_fn = mcc elif task_name in ['mrpc', 'qqp']: metric_fn", "List[int]): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return {\"pearson\": pearson_corr, \"spearmanr\":", "for GLUE tasks Args: task_name: GLUE task name preds: model predictions labels: golden", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "List[int]): return {\"mcc\": matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds: List[int], labels: List[int]): pearson_corr = pearsonr(preds,", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "and labels must have the same length\") metric_fn = accuracy if task_name ==", "HuggingFace Inc. team. # Copyright (c) 2020, <NAME>. All rights reserved. # #", "return {\"acc\": (preds == labels).mean()} def acc_and_f1(preds: List[int], labels: List[int]): accuracy = (preds", "= mcc elif task_name in ['mrpc', 'qqp']: metric_fn = acc_and_f1 elif task_name ==", "Team Authors and # The HuggingFace Inc. team. # Copyright (c) 2020, <NAME>.", "applicable law or agreed to in writing, software # distributed under the License", "pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return {\"pearson\": pearson_corr, \"spearmanr\": spearman_corr, \"pear+spear av\":", "spearman_corr) / 2} def compute_metrics(task_name: str, preds: List[int], labels: List[int]) -> Dict[str, float]:", "name preds: model predictions labels: golden labels Returns: metrics \"\"\" if len(preds) !=", "governing permissions and # limitations under the License. from typing import Dict, List", "Authors and # The HuggingFace Inc. team. # Copyright (c) 2020, <NAME>. All", "typing import Dict, List from scipy.stats import pearsonr, spearmanr from sklearn.metrics import f1_score,", "= f1_score(y_true=labels, y_pred=preds) return {\"acc\": accuracy, \"f1\": f1} def mcc(preds: List[int], labels: List[int]):", "pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef __all__ = ['compute_metrics'] def accuracy(preds: List[int],", "Returns: metrics \"\"\" if len(preds) != len(labels): raise ValueError(\"Predictions and labels must have", "matthews_corrcoef __all__ = ['compute_metrics'] def accuracy(preds: List[int], labels: List[int]): return {\"acc\": (preds ==", "labels: golden labels Returns: metrics \"\"\" if len(preds) != len(labels): raise ValueError(\"Predictions and", "or agreed to in writing, software # distributed under the License is distributed", "or implied. # See the License for the specific language governing permissions and", "['compute_metrics'] def accuracy(preds: List[int], labels: List[int]): return {\"acc\": (preds == labels).mean()} def acc_and_f1(preds:", "scipy.stats import pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef __all__ = ['compute_metrics'] def", "def compute_metrics(task_name: str, preds: List[int], labels: List[int]) -> Dict[str, float]: \"\"\" Computes metrics", "def acc_and_f1(preds: List[int], labels: List[int]): accuracy = (preds == labels).mean() f1 = f1_score(y_true=labels,", "def mcc(preds: List[int], labels: List[int]): return {\"mcc\": matthews_corrcoef(labels, preds)} def pearson_and_spearman(preds: List[int], labels:", "(c) 2020, <NAME>. All rights reserved. # # Licensed under the Apache License,", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "All rights reserved. # # Licensed under the Apache License, Version 2.0 (the", "2} def compute_metrics(task_name: str, preds: List[int], labels: List[int]) -> Dict[str, float]: \"\"\" Computes", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "GLUE task name preds: model predictions labels: golden labels Returns: metrics \"\"\" if", "# Copyright (c) 2020, <NAME>. All rights reserved. # # Licensed under the", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "def pearson_and_spearman(preds: List[int], labels: List[int]): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0]", "\"spearmanr\": spearman_corr, \"pear+spear av\": (pearson_corr + spearman_corr) / 2} def compute_metrics(task_name: str, preds:", "List[int]): return {\"acc\": (preds == labels).mean()} def acc_and_f1(preds: List[int], labels: List[int]): accuracy =", "The Google AI Language Team Authors and # The HuggingFace Inc. team. #", "import Dict, List from scipy.stats import pearsonr, spearmanr from sklearn.metrics import f1_score, matthews_corrcoef", "accuracy = (preds == labels).mean() f1 = f1_score(y_true=labels, y_pred=preds) return {\"acc\": accuracy, \"f1\":", "spearmanr(preds, labels)[0] return {\"pearson\": pearson_corr, \"spearmanr\": spearman_corr, \"pear+spear av\": (pearson_corr + spearman_corr) /", "with the License. # You may obtain a copy of the License at", "{\"pearson\": pearson_corr, \"spearmanr\": spearman_corr, \"pear+spear av\": (pearson_corr + spearman_corr) / 2} def compute_metrics(task_name:", "\"pear+spear av\": (pearson_corr + spearman_corr) / 2} def compute_metrics(task_name: str, preds: List[int], labels:", "List[int]) -> Dict[str, float]: \"\"\" Computes metrics for GLUE tasks Args: task_name: GLUE", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "tasks Args: task_name: GLUE task name preds: model predictions labels: golden labels Returns:", "ValueError(\"Predictions and labels must have the same length\") metric_fn = accuracy if task_name", "limitations under the License. from typing import Dict, List from scipy.stats import pearsonr,", "in writing, software # distributed under the License is distributed on an \"AS", "and # limitations under the License. from typing import Dict, List from scipy.stats", "= accuracy if task_name == 'cola': metric_fn = mcc elif task_name in ['mrpc',", "have the same length\") metric_fn = accuracy if task_name == 'cola': metric_fn =", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "task name preds: model predictions labels: golden labels Returns: metrics \"\"\" if len(preds)" ]
[ "a graph.\"\"\" article = self.manager.get_article_by_pmid('10505536') drug_protein_interaction = article.drug_protein_interactions.all()[0] protein = drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id)", "populated correctly.\"\"\" def test_count(self): \"\"\"Tests the correct number of drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23,", "class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the database is populated correctly.\"\"\" def test_count(self): \"\"\"Tests the correct", "= self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis = list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis)) def test_bel(self): \"\"\"Test adding a", "drug_protein_interaction = article.drug_protein_interactions.all()[0] protein = drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id) drug = drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id)", "self.manager.count_articles()) def test_article(self): \"\"\"Test lookup of an article.\"\"\" article = self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis", "list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis)) def test_bel(self): \"\"\"Test adding a DTI to a graph.\"\"\" article", "CITATION_TYPE, CITATION_TYPE_PUBMED from tests.constants import PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the database is populated", "BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges()) _, _, data = list(graph.edges(data=True))[0] self.assertIn(CITATION, data)", "\"\"\"Suites for testing the populated database.\"\"\" from pybel import BELGraph from pybel.constants import", "self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges()) _, _, data = list(graph.edges(data=True))[0] self.assertIn(CITATION, data) self.assertIn(CITATION_TYPE, data[CITATION])", "= drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id) drug = drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin', drug.name) graph =", "drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin', drug.name) graph = BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges())", "def test_count(self): \"\"\"Tests the correct number of drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles()) def", "test_article(self): \"\"\"Test lookup of an article.\"\"\" article = self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis = list(article.drug_protein_interactions)", "self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis = list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis)) def test_bel(self): \"\"\"Test adding a DTI", "data = list(graph.edges(data=True))[0] self.assertIn(CITATION, data) self.assertIn(CITATION_TYPE, data[CITATION]) self.assertEqual(CITATION_TYPE_PUBMED, data[CITATION][CITATION_TYPE]) self.assertIn(CITATION_REFERENCE, data[CITATION]) self.assertEqual('10505536', data[CITATION][CITATION_REFERENCE])", "graph = BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges()) _, _, data = list(graph.edges(data=True))[0]", "import BELGraph from pybel.constants import CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED from tests.constants import PopulatedTemporaryCacheClassMixin", "self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles()) def test_article(self): \"\"\"Test lookup of an article.\"\"\" article =", "-*- \"\"\"Suites for testing the populated database.\"\"\" from pybel import BELGraph from pybel.constants", "of an article.\"\"\" article = self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis = list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis)) def", "DTI to a graph.\"\"\" article = self.manager.get_article_by_pmid('10505536') drug_protein_interaction = article.drug_protein_interactions.all()[0] protein = drug_protein_interaction.protein", "self.assertIsNotNone(article) dpis = list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis)) def test_bel(self): \"\"\"Test adding a DTI to", "import CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED from tests.constants import PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the", "def test_bel(self): \"\"\"Test adding a DTI to a graph.\"\"\" article = self.manager.get_article_by_pmid('10505536') drug_protein_interaction", "article = self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis = list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis)) def test_bel(self): \"\"\"Test adding", "# -*- coding: utf-8 -*- \"\"\"Suites for testing the populated database.\"\"\" from pybel", "database is populated correctly.\"\"\" def test_count(self): \"\"\"Tests the correct number of drugs.\"\"\" self.assertEqual(4,", "pybel.constants import CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED from tests.constants import PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests", "from pybel import BELGraph from pybel.constants import CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED from tests.constants", "the database is populated correctly.\"\"\" def test_count(self): \"\"\"Tests the correct number of drugs.\"\"\"", "= BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges()) _, _, data = list(graph.edges(data=True))[0] self.assertIn(CITATION,", "self.assertLessEqual(23, self.manager.count_articles()) def test_article(self): \"\"\"Test lookup of an article.\"\"\" article = self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article)", "article.drug_protein_interactions.all()[0] protein = drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id) drug = drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin', drug.name)", "BELGraph from pybel.constants import CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED from tests.constants import PopulatedTemporaryCacheClassMixin class", "drug = drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin', drug.name) graph = BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes())", "self.assertEqual(6, graph.number_of_edges()) _, _, data = list(graph.edges(data=True))[0] self.assertIn(CITATION, data) self.assertIn(CITATION_TYPE, data[CITATION]) self.assertEqual(CITATION_TYPE_PUBMED, data[CITATION][CITATION_TYPE])", "populated database.\"\"\" from pybel import BELGraph from pybel.constants import CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED", "test_count(self): \"\"\"Tests the correct number of drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles()) def test_article(self):", "-*- coding: utf-8 -*- \"\"\"Suites for testing the populated database.\"\"\" from pybel import", "protein.uniprot_id) drug = drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin', drug.name) graph = BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2,", "self.assertEqual('Lepirudin', drug.name) graph = BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges()) _, _, data", "number of drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles()) def test_article(self): \"\"\"Test lookup of an", "drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id) drug = drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin', drug.name) graph = BELGraph()", "self.assertEqual('P00734', protein.uniprot_id) drug = drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin', drug.name) graph = BELGraph() drug_protein_interaction.add_to_graph(graph)", "= self.manager.get_article_by_pmid('10505536') drug_protein_interaction = article.drug_protein_interactions.all()[0] protein = drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id) drug = drug_protein_interaction.drug", "= drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin', drug.name) graph = BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6,", "graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges()) _, _, data = list(graph.edges(data=True))[0] self.assertIn(CITATION, data) self.assertIn(CITATION_TYPE, data[CITATION]) self.assertEqual(CITATION_TYPE_PUBMED,", "CITATION_TYPE_PUBMED from tests.constants import PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the database is populated correctly.\"\"\"", "correctly.\"\"\" def test_count(self): \"\"\"Tests the correct number of drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles())", "an article.\"\"\" article = self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis = list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis)) def test_bel(self):", "article.\"\"\" article = self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis = list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis)) def test_bel(self): \"\"\"Test", "= article.drug_protein_interactions.all()[0] protein = drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id) drug = drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin',", "_, _, data = list(graph.edges(data=True))[0] self.assertIn(CITATION, data) self.assertIn(CITATION_TYPE, data[CITATION]) self.assertEqual(CITATION_TYPE_PUBMED, data[CITATION][CITATION_TYPE]) self.assertIn(CITATION_REFERENCE, data[CITATION])", "protein = drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id) drug = drug_protein_interaction.drug self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin', drug.name) graph", "CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED from tests.constants import PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the database is", "graph.\"\"\" article = self.manager.get_article_by_pmid('10505536') drug_protein_interaction = article.drug_protein_interactions.all()[0] protein = drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id) drug", "of drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles()) def test_article(self): \"\"\"Test lookup of an article.\"\"\"", "dpis = list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis)) def test_bel(self): \"\"\"Test adding a DTI to a", "\"\"\"Tests the database is populated correctly.\"\"\" def test_count(self): \"\"\"Tests the correct number of", "drug.drugbank_id) self.assertEqual('Lepirudin', drug.name) graph = BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges()) _, _,", "the populated database.\"\"\" from pybel import BELGraph from pybel.constants import CITATION, CITATION_REFERENCE, CITATION_TYPE,", "for testing the populated database.\"\"\" from pybel import BELGraph from pybel.constants import CITATION,", "drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges()) _, _, data = list(graph.edges(data=True))[0] self.assertIn(CITATION, data) self.assertIn(CITATION_TYPE,", "article = self.manager.get_article_by_pmid('10505536') drug_protein_interaction = article.drug_protein_interactions.all()[0] protein = drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id) drug =", "= list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis)) def test_bel(self): \"\"\"Test adding a DTI to a graph.\"\"\"", "correct number of drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles()) def test_article(self): \"\"\"Test lookup of", "TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the database is populated correctly.\"\"\" def test_count(self): \"\"\"Tests the correct number", "a DTI to a graph.\"\"\" article = self.manager.get_article_by_pmid('10505536') drug_protein_interaction = article.drug_protein_interactions.all()[0] protein =", "tests.constants import PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the database is populated correctly.\"\"\" def test_count(self):", "_, data = list(graph.edges(data=True))[0] self.assertIn(CITATION, data) self.assertIn(CITATION_TYPE, data[CITATION]) self.assertEqual(CITATION_TYPE_PUBMED, data[CITATION][CITATION_TYPE]) self.assertIn(CITATION_REFERENCE, data[CITATION]) self.assertEqual('10505536',", "the correct number of drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles()) def test_article(self): \"\"\"Test lookup", "adding a DTI to a graph.\"\"\" article = self.manager.get_article_by_pmid('10505536') drug_protein_interaction = article.drug_protein_interactions.all()[0] protein", "pybel import BELGraph from pybel.constants import CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED from tests.constants import", "from pybel.constants import CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED from tests.constants import PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin):", "database.\"\"\" from pybel import BELGraph from pybel.constants import CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED from", "graph.number_of_edges()) _, _, data = list(graph.edges(data=True))[0] self.assertIn(CITATION, data) self.assertIn(CITATION_TYPE, data[CITATION]) self.assertEqual(CITATION_TYPE_PUBMED, data[CITATION][CITATION_TYPE]) self.assertIn(CITATION_REFERENCE,", "utf-8 -*- \"\"\"Suites for testing the populated database.\"\"\" from pybel import BELGraph from", "to a graph.\"\"\" article = self.manager.get_article_by_pmid('10505536') drug_protein_interaction = article.drug_protein_interactions.all()[0] protein = drug_protein_interaction.protein self.assertEqual('P00734',", "import PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the database is populated correctly.\"\"\" def test_count(self): \"\"\"Tests", "is populated correctly.\"\"\" def test_count(self): \"\"\"Tests the correct number of drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs())", "test_bel(self): \"\"\"Test adding a DTI to a graph.\"\"\" article = self.manager.get_article_by_pmid('10505536') drug_protein_interaction =", "lookup of an article.\"\"\" article = self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis = list(article.drug_protein_interactions) self.assertNotEqual(0, len(dpis))", "\"\"\"Test lookup of an article.\"\"\" article = self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis = list(article.drug_protein_interactions) self.assertNotEqual(0,", "self.assertNotEqual(0, len(dpis)) def test_bel(self): \"\"\"Test adding a DTI to a graph.\"\"\" article =", "drug.name) graph = BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges()) _, _, data =", "testing the populated database.\"\"\" from pybel import BELGraph from pybel.constants import CITATION, CITATION_REFERENCE,", "<reponame>bio2bel/drugbank # -*- coding: utf-8 -*- \"\"\"Suites for testing the populated database.\"\"\" from", "def test_article(self): \"\"\"Test lookup of an article.\"\"\" article = self.manager.get_article_by_pmid('10505536') self.assertIsNotNone(article) dpis =", "self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles()) def test_article(self): \"\"\"Test lookup of an article.\"\"\" article = self.manager.get_article_by_pmid('10505536')", "len(dpis)) def test_bel(self): \"\"\"Test adding a DTI to a graph.\"\"\" article = self.manager.get_article_by_pmid('10505536')", "drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles()) def test_article(self): \"\"\"Test lookup of an article.\"\"\" article", "from tests.constants import PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the database is populated correctly.\"\"\" def", "self.manager.get_article_by_pmid('10505536') drug_protein_interaction = article.drug_protein_interactions.all()[0] protein = drug_protein_interaction.protein self.assertEqual('P00734', protein.uniprot_id) drug = drug_protein_interaction.drug self.assertEqual('DB00001',", "\"\"\"Tests the correct number of drugs.\"\"\" self.assertEqual(4, self.manager.count_drugs()) self.assertLessEqual(23, self.manager.count_articles()) def test_article(self): \"\"\"Test", "coding: utf-8 -*- \"\"\"Suites for testing the populated database.\"\"\" from pybel import BELGraph", "\"\"\"Test adding a DTI to a graph.\"\"\" article = self.manager.get_article_by_pmid('10505536') drug_protein_interaction = article.drug_protein_interactions.all()[0]", "PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the database is populated correctly.\"\"\" def test_count(self): \"\"\"Tests the", "self.assertEqual('DB00001', drug.drugbank_id) self.assertEqual('Lepirudin', drug.name) graph = BELGraph() drug_protein_interaction.add_to_graph(graph) self.assertEqual(2, graph.number_of_nodes()) self.assertEqual(6, graph.number_of_edges()) _,", "CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED from tests.constants import PopulatedTemporaryCacheClassMixin class TestPopulation(PopulatedTemporaryCacheClassMixin): \"\"\"Tests the database" ]
[ "grayscale(0-255) Return: a float image consisted of correlation coefficient of each pixel. \"\"\"", "of same size the greater this coefficient is, the similar this two patches", "same size the greater this coefficient is, the similar this two patches are.", "There are two useful functions: 1. correlationCoef will tell you the coreelation coefficient", "cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template): \"\"\" Parameters: img: image,", "size') print('Size of graph 1:',(g1.shape)) print('Size of graph 2:',(g2.shape)) return 0 #2. Calculate", "coef def matchTemplate(img,template): \"\"\" Parameters: img: image, such as a cat, grayscale(0-255) template:", "correlation coefficient of each pixel. \"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row in range(h-win_h):", "#1. make sure I read the correct patches if(g1.shape!=g2.shape): print('Invalid patch. Patch should", "consisted of correlation coefficient of each pixel. \"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row", "and implement correlationCoef function on every window comparing it to template. \"\"\" import", "is, the similar this two patches are. 2. matchTemplate will automatically go through", "#2. Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float)", "1. correlationCoef will tell you the coreelation coefficient of two patches of same", "Return: a float image consisted of correlation coefficient of each pixel. \"\"\" win_w,win_h=template.shape[::-1]", "Return: Correlation coefficient(float). \"\"\" #1. make sure I read the correct patches if(g1.shape!=g2.shape):", "coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template): \"\"\" Parameters: img: image, such as a cat,", "of correlation coefficient of each pixel. \"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row in", "array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template): \"\"\"", "'img' with a sliding window and implement correlationCoef function on every window comparing", "Correlation coefficient(float). \"\"\" #1. make sure I read the correct patches if(g1.shape!=g2.shape): print('Invalid", "sure I read the correct patches if(g1.shape!=g2.shape): print('Invalid patch. Patch should be in", "with a sliding window and implement correlationCoef function on every window comparing it", "result=np.zeros(img.shape) for row in range(h-win_h): for col in range(w-win_w): t_patch=img[row:row+win_h,col:col+win_w] result[row,col]=correlationCoef(template,t_patch) return result", "patches are. 2. matchTemplate will automatically go through the whole input 'img' with", "should be in same size') print('Size of graph 1:',(g1.shape)) print('Size of graph 2:',(g2.shape))", "of each pixel. \"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row in range(h-win_h): for col", "two patches are. 2. matchTemplate will automatically go through the whole input 'img'", "\"\"\" import cv2 import numpy as np from matplotlib import pyplot as plt", "such as a cat, grayscale(0-255) template: your target, such as a cat's paw,", "coefficient(float). \"\"\" #1. make sure I read the correct patches if(g1.shape!=g2.shape): print('Invalid patch.", "cat's paw, grayscale(0-255) Return: a float image consisted of correlation coefficient of each", "useful functions: 1. correlationCoef will tell you the coreelation coefficient of two patches", "import numpy as np from matplotlib import pyplot as plt def correlationCoef(g1,g2): \"\"\"", "I read the correct patches if(g1.shape!=g2.shape): print('Invalid patch. Patch should be in same", "read the correct patches if(g1.shape!=g2.shape): print('Invalid patch. Patch should be in same size')", "matplotlib import pyplot as plt def correlationCoef(g1,g2): \"\"\" Parameters: g1: graph one, grayscale(0-255)", "coefficient is, the similar this two patches are. 2. matchTemplate will automatically go", "pixel. \"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row in range(h-win_h): for col in range(w-win_w):", "of two patches of same size the greater this coefficient is, the similar", "this coefficient is, the similar this two patches are. 2. matchTemplate will automatically", "two patches of same size the greater this coefficient is, the similar this", "two useful functions: 1. correlationCoef will tell you the coreelation coefficient of two", "coefficient(float) coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template): \"\"\" Parameters: img: image, such as a", "will tell you the coreelation coefficient of two patches of same size the", "make sure I read the correct patches if(g1.shape!=g2.shape): print('Invalid patch. Patch should be", "comparing it to template. \"\"\" import cv2 import numpy as np from matplotlib", "\"\"\" There are two useful functions: 1. correlationCoef will tell you the coreelation", "Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2)", "will automatically go through the whole input 'img' with a sliding window and", "image consisted of correlation coefficient of each pixel. \"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for", "matchTemplate will automatically go through the whole input 'img' with a sliding window", "the whole input 'img' with a sliding window and implement correlationCoef function on", "graph 2:',(g2.shape)) return 0 #2. Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2)", "\"\"\" #1. make sure I read the correct patches if(g1.shape!=g2.shape): print('Invalid patch. Patch", "def correlationCoef(g1,g2): \"\"\" Parameters: g1: graph one, grayscale(0-255) g2: graph two, grayscale(0-255) Return:", "a cat's paw, grayscale(0-255) Return: a float image consisted of correlation coefficient of", "template. \"\"\" import cv2 import numpy as np from matplotlib import pyplot as", "same size') print('Size of graph 1:',(g1.shape)) print('Size of graph 2:',(g2.shape)) return 0 #2.", "Parameters: img: image, such as a cat, grayscale(0-255) template: your target, such as", "similar this two patches are. 2. matchTemplate will automatically go through the whole", "this two patches are. 2. matchTemplate will automatically go through the whole input", "every window comparing it to template. \"\"\" import cv2 import numpy as np", "1:',(g1.shape)) print('Size of graph 2:',(g2.shape)) return 0 #2. Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2)", "\"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row in range(h-win_h): for col in range(w-win_w): t_patch=img[row:row+win_h,col:col+win_w]", "two, grayscale(0-255) Return: Correlation coefficient(float). \"\"\" #1. make sure I read the correct", "paw, grayscale(0-255) Return: a float image consisted of correlation coefficient of each pixel.", "the greater this coefficient is, the similar this two patches are. 2. matchTemplate", "0 #2. Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate", "sliding window and implement correlationCoef function on every window comparing it to template.", "target, such as a cat's paw, grayscale(0-255) Return: a float image consisted of", "be in same size') print('Size of graph 1:',(g1.shape)) print('Size of graph 2:',(g2.shape)) return", "greater this coefficient is, the similar this two patches are. 2. matchTemplate will", "go through the whole input 'img' with a sliding window and implement correlationCoef", "as plt def correlationCoef(g1,g2): \"\"\" Parameters: g1: graph one, grayscale(0-255) g2: graph two,", "np from matplotlib import pyplot as plt def correlationCoef(g1,g2): \"\"\" Parameters: g1: graph", "coefficient of two patches of same size the greater this coefficient is, the", "Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return", "cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template): \"\"\" Parameters: img:", "Patch should be in same size') print('Size of graph 1:',(g1.shape)) print('Size of graph", "pyplot as plt def correlationCoef(g1,g2): \"\"\" Parameters: g1: graph one, grayscale(0-255) g2: graph", "of graph 1:',(g1.shape)) print('Size of graph 2:',(g2.shape)) return 0 #2. Calculate Statistic Infomation", "2. matchTemplate will automatically go through the whole input 'img' with a sliding", "window and implement correlationCoef function on every window comparing it to template. \"\"\"", "size the greater this coefficient is, the similar this two patches are. 2.", "\"\"\" Parameters: img: image, such as a cat, grayscale(0-255) template: your target, such", "grayscale(0-255) Return: Correlation coefficient(float). \"\"\" #1. make sure I read the correct patches", "graph two, grayscale(0-255) Return: Correlation coefficient(float). \"\"\" #1. make sure I read the", "Parameters: g1: graph one, grayscale(0-255) g2: graph two, grayscale(0-255) Return: Correlation coefficient(float). \"\"\"", "such as a cat's paw, grayscale(0-255) Return: a float image consisted of correlation", "correlationCoef(g1,g2): \"\"\" Parameters: g1: graph one, grayscale(0-255) g2: graph two, grayscale(0-255) Return: Correlation", "whole input 'img' with a sliding window and implement correlationCoef function on every", "print('Size of graph 1:',(g1.shape)) print('Size of graph 2:',(g2.shape)) return 0 #2. Calculate Statistic", "automatically go through the whole input 'img' with a sliding window and implement", "of graph 2:',(g2.shape)) return 0 #2. Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel()", "implement correlationCoef function on every window comparing it to template. \"\"\" import cv2", "are. 2. matchTemplate will automatically go through the whole input 'img' with a", "coefficient of each pixel. \"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row in range(h-win_h): for", "functions: 1. correlationCoef will tell you the coreelation coefficient of two patches of", "Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef", "correlationCoef function on every window comparing it to template. \"\"\" import cv2 import", "as a cat's paw, grayscale(0-255) Return: a float image consisted of correlation coefficient", "function on every window comparing it to template. \"\"\" import cv2 import numpy", "template: your target, such as a cat's paw, grayscale(0-255) Return: a float image", "window comparing it to template. \"\"\" import cv2 import numpy as np from", "from matplotlib import pyplot as plt def correlationCoef(g1,g2): \"\"\" Parameters: g1: graph one,", "matchTemplate(img,template): \"\"\" Parameters: img: image, such as a cat, grayscale(0-255) template: your target,", "patches of same size the greater this coefficient is, the similar this two", "graph 1:',(g1.shape)) print('Size of graph 2:',(g2.shape)) return 0 #2. Calculate Statistic Infomation std_g1=np.std(g1)", "array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template): \"\"\" Parameters:", "image, such as a cat, grayscale(0-255) template: your target, such as a cat's", "cv2 import numpy as np from matplotlib import pyplot as plt def correlationCoef(g1,g2):", "are two useful functions: 1. correlationCoef will tell you the coreelation coefficient of", "return 0 #2. Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3.", "a sliding window and implement correlationCoef function on every window comparing it to", "return coef def matchTemplate(img,template): \"\"\" Parameters: img: image, such as a cat, grayscale(0-255)", "std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template):", "as a cat, grayscale(0-255) template: your target, such as a cat's paw, grayscale(0-255)", "print('Size of graph 2:',(g2.shape)) return 0 #2. Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel()", "a float image consisted of correlation coefficient of each pixel. \"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1]", "win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row in range(h-win_h): for col in range(w-win_w): t_patch=img[row:row+win_h,col:col+win_w] result[row,col]=correlationCoef(template,t_patch)", "g1: graph one, grayscale(0-255) g2: graph two, grayscale(0-255) Return: Correlation coefficient(float). \"\"\" #1.", "Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template): \"\"\" Parameters: img: image, such as", "in same size') print('Size of graph 1:',(g1.shape)) print('Size of graph 2:',(g2.shape)) return 0", "#3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef def matchTemplate(img,template): \"\"\" Parameters: img: image, such", "cat, grayscale(0-255) template: your target, such as a cat's paw, grayscale(0-255) Return: a", "patches if(g1.shape!=g2.shape): print('Invalid patch. Patch should be in same size') print('Size of graph", "grayscale(0-255) g2: graph two, grayscale(0-255) Return: Correlation coefficient(float). \"\"\" #1. make sure I", "patch. Patch should be in same size') print('Size of graph 1:',(g1.shape)) print('Size of", "through the whole input 'img' with a sliding window and implement correlationCoef function", "g2: graph two, grayscale(0-255) Return: Correlation coefficient(float). \"\"\" #1. make sure I read", "correlationCoef will tell you the coreelation coefficient of two patches of same size", "one, grayscale(0-255) g2: graph two, grayscale(0-255) Return: Correlation coefficient(float). \"\"\" #1. make sure", "it to template. \"\"\" import cv2 import numpy as np from matplotlib import", "on every window comparing it to template. \"\"\" import cv2 import numpy as", "the coreelation coefficient of two patches of same size the greater this coefficient", "graph one, grayscale(0-255) g2: graph two, grayscale(0-255) Return: Correlation coefficient(float). \"\"\" #1. make", "numpy as np from matplotlib import pyplot as plt def correlationCoef(g1,g2): \"\"\" Parameters:", "import pyplot as plt def correlationCoef(g1,g2): \"\"\" Parameters: g1: graph one, grayscale(0-255) g2:", "the correct patches if(g1.shape!=g2.shape): print('Invalid patch. Patch should be in same size') print('Size", "def matchTemplate(img,template): \"\"\" Parameters: img: image, such as a cat, grayscale(0-255) template: your", "input 'img' with a sliding window and implement correlationCoef function on every window", "import cv2 import numpy as np from matplotlib import pyplot as plt def", "if(g1.shape!=g2.shape): print('Invalid patch. Patch should be in same size') print('Size of graph 1:',(g1.shape))", "each pixel. \"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape) for row in range(h-win_h): for col in", "\"\"\" Parameters: g1: graph one, grayscale(0-255) g2: graph two, grayscale(0-255) Return: Correlation coefficient(float).", "the similar this two patches are. 2. matchTemplate will automatically go through the", "you the coreelation coefficient of two patches of same size the greater this", "correct patches if(g1.shape!=g2.shape): print('Invalid patch. Patch should be in same size') print('Size of", "float image consisted of correlation coefficient of each pixel. \"\"\" win_w,win_h=template.shape[::-1] w,h=img.shape[::-1] result=np.zeros(img.shape)", "tell you the coreelation coefficient of two patches of same size the greater", "std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0] #3. Calculate coefficient(float) coef=cov/(std_g1*std_g2) return coef def", "coreelation coefficient of two patches of same size the greater this coefficient is,", "img: image, such as a cat, grayscale(0-255) template: your target, such as a", "plt def correlationCoef(g1,g2): \"\"\" Parameters: g1: graph one, grayscale(0-255) g2: graph two, grayscale(0-255)", "as np from matplotlib import pyplot as plt def correlationCoef(g1,g2): \"\"\" Parameters: g1:", "print('Invalid patch. Patch should be in same size') print('Size of graph 1:',(g1.shape)) print('Size", "grayscale(0-255) template: your target, such as a cat's paw, grayscale(0-255) Return: a float", "2:',(g2.shape)) return 0 #2. Calculate Statistic Infomation std_g1=np.std(g1) std_g2=np.std(g2) array1=g1.ravel() array2=g2.ravel() cov_matrix=np.cov(array1,array2) cov=cov_matrix[1,0]", "w,h=img.shape[::-1] result=np.zeros(img.shape) for row in range(h-win_h): for col in range(w-win_w): t_patch=img[row:row+win_h,col:col+win_w] result[row,col]=correlationCoef(template,t_patch) return", "your target, such as a cat's paw, grayscale(0-255) Return: a float image consisted", "a cat, grayscale(0-255) template: your target, such as a cat's paw, grayscale(0-255) Return:", "to template. \"\"\" import cv2 import numpy as np from matplotlib import pyplot" ]
[ "efficiently mentions of Wikidata items in text. \"\"\" def __init__(self, solr_collection, bow, graph):", "items in text. \"\"\" def __init__(self, solr_collection, bow, graph): \"\"\" Creates a tagger", "\"in\" to match with initials \"OF\", \"IN\", as well as sport scores, postcodes,", "phrase, mention, docs, mentions): \"\"\" Adds more info to the mentions returned from", "a list of [key1,val1,key2,val2,...] to a dict \"\"\" return { lst[2*k]: lst[2*k+1] for", "self.bow = bow self.graph = graph self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re = re.compile(r'^(\\w\\w?|[\\d ]{,4})$')", "words language model, adequately trained, which will be used to evaluate the likelihood", "phrase): \"\"\" Should this phrase be pruned? It happens when it is shorter", "docs, mentions): \"\"\" Adds more info to the mentions returned from Solr, to", "enhance with scores :param docs: dictionary from qid to item :param mentions: the", "Creates a tagger from: - a solr collection name, which has been adequately", "start = mention['startOffset'] end = mention['endOffset'] surface = phrase[start:end] surface_score = self.bow.log_likelihood(surface) ranked_tags", "re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length = 10000 def tag_and_rank(self, phrase, prune=True): \"\"\" Given some text,", "pruned_mentions def prune_phrase(self, phrase): \"\"\" Should this phrase be pruned? It happens when", "import requests import logging import re from math import log from .languagemodel import", "mention, docs, mentions): \"\"\" Adds more info to the mentions returned from Solr,", "\"IN\", as well as sport scores, postcodes, and so on. \"\"\" return self.prune_re.match(phrase)", "prune=True): \"\"\" Given some text, use the solr index to retrieve candidate items", "phrase, prune=True): \"\"\" Given some text, use the solr index to retrieve candidate", "for mention in resp.get('tags', []) ] docs = { doc['id']:doc for doc in", "in mention.get('tags', []): if 'edges' in tag: del tag['edges'] if 'aliases' in tag:", "page rank and edge similarity mentions_json = [ self._dictify(mention) for mention in resp.get('tags',", "import json import requests import logging import re from math import log from", "Wikidata items about characters, or to prevent short words such as \"of\" or", "Enhance mentions with page rank and edge similarity mentions_json = [ self._dictify(mention) for", "self.prune_re.match(phrase) is not None and phrase.lower() == phrase def _create_mention(self, phrase, mention, docs,", "matches of Wikidata items about characters, or to prevent short words such as", "introduced to remove matches of Wikidata items about characters, or to prevent short", "mention['endOffset'] surface = phrase[start:end] surface_score = self.bow.log_likelihood(surface) ranked_tags = [] for qid in", "text. :param prune: if True, ignores lowercase mentions shorter than 3 characters \"\"\"", "of [key1,val1,key2,val2,...] to a dict \"\"\" return { lst[2*k]: lst[2*k+1] for k in", "by the classifier. :param phrase: the original document :param mention: the JSON mention", "tagger.tag_and_rank(phrase) for mention in tags: for tag in mention.get('tags', []): if 'edges' in", "the solr index to retrieve candidate items mentioned in the text. :param prune:", "= tagger.tag_and_rank(phrase) for mention in tags: for tag in mention.get('tags', []): if 'edges'", "logger.debug('Tagging text with solr (length {})'.format(len(phrase))) r = requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json',", "tagger from: - a solr collection name, which has been adequately initialized with", "compute the page rank and the edges between items \"\"\" self.bow = bow", "original document :param mention: the JSON mention to enhance with scores :param docs:", "JSON mention to enhance with scores :param docs: dictionary from qid to item", "ranking by the classifier. :param phrase: the original document :param mention: the JSON", "mentions_json = [ self._dictify(mention) for mention in resp.get('tags', []) ] docs = {", "resp.get('tags', []) ] docs = { doc['id']:doc for doc in resp.get('response', {}).get('docs', [])", "dict \"\"\" return { lst[2*k]: lst[2*k+1] for k in range(len(lst)//2) } if __name__", "to evaluate the likelihood of phrases - a wikidata graph, adequately loaded, which", "graph): \"\"\" Creates a tagger from: - a solr collection name, which has", "shorter than 3 characters and appears in lowercase in the text, or only", "math import log from .languagemodel import BOWLanguageModel from .wikidatagraph import WikidataGraph from .tag", "the enhanced mention, as a Mention object \"\"\" start = mention['startOffset'] end =", "import Mention # solr_collection = 'wd_multilingual' logger = logging.getLogger(__name__) class Tagger(object): \"\"\" The", "not None and phrase.lower() == phrase def _create_mention(self, phrase, mention, docs, mentions): \"\"\"", "of Wikidata items about characters, or to prevent short words such as \"of\"", "is not None and phrase.lower() == phrase def _create_mention(self, phrase, mention, docs, mentions):", "= [ self._create_mention(phrase, mention, docs, mentions_json) for mention in mentions_json ] pruned_mentions =", "edges between items \"\"\" self.bow = bow self.graph = graph self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection)", "with documents - a bag of words language model, adequately trained, which will", ") def _dictify(self, lst): \"\"\" Converts a list of [key1,val1,key2,val2,...] to a dict", "r.raise_for_status() logger.debug('Tagging succeeded') resp = r.json() # Enhance mentions with page rank and", "if 'edges' in tag: del tag['edges'] if 'aliases' in tag: del tag['aliases'] print(json.dumps(tags,", "rank and the edges between items \"\"\" self.bow = bow self.graph = graph", "ignores lowercase mentions shorter than 3 characters \"\"\" # Tag phrase = phrase[:self.max_length]", "phrase: the original document :param mention: the JSON mention to enhance with scores", "prepare them for ranking by the classifier. :param phrase: the original document :param", "tagger = Tagger(bow, graph) while True: phrase = input('>>> ') tags = tagger.tag_and_rank(phrase)", "to match with initials \"OF\", \"IN\", as well as sport scores, postcodes, and", "logging import re from math import log from .languagemodel import BOWLanguageModel from .wikidatagraph", "lowercase mentions shorter than 3 characters \"\"\" # Tag phrase = phrase[:self.max_length] logger.debug('Tagging", "mention['ids']: item = dict(docs[qid].items()) item['rank'] = 23. + log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return Mention( phrase=surface,", "with a compatible index and filled with documents - a bag of words", "a Wikidata dump in Solr and uses it to detect efficiently mentions of", "solr_collection = 'wd_multilingual' logger = logging.getLogger(__name__) class Tagger(object): \"\"\" The tagger indexes a", "self.bow.log_likelihood(surface) ranked_tags = [] for qid in mention['ids']: item = dict(docs[qid].items()) item['rank'] =", "log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return Mention( phrase=surface, start=start, end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10], )", "self._create_mention(phrase, mention, docs, mentions_json) for mention in mentions_json ] pruned_mentions = [ mention", "well as sport scores, postcodes, and so on. \"\"\" return self.prune_re.match(phrase) is not", "text, or only consists of digits. This is mostly introduced to remove matches", "docs, mentions_json) for mention in mentions_json ] pruned_mentions = [ mention for mention", "BOWLanguageModel from .wikidatagraph import WikidataGraph from .tag import Tag from .mention import Mention", "end = mention['endOffset'] surface = phrase[start:end] surface_score = self.bow.log_likelihood(surface) ranked_tags = [] for", "'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re = re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length = 10000 def tag_and_rank(self, phrase, prune=True): \"\"\"", "to item :param mentions: the list of all mentions in the document :returns:", "a bag of words language model, adequately trained, which will be used to", "Tag phrase = phrase[:self.max_length] logger.debug('Tagging text with solr (length {})'.format(len(phrase))) r = requests.post(self.solr_endpoint,", "returned from Solr, to prepare them for ranking by the classifier. :param phrase:", "to detect efficiently mentions of Wikidata items in text. \"\"\" def __init__(self, solr_collection,", "the classifier. :param phrase: the original document :param mention: the JSON mention to", "consists of digits. This is mostly introduced to remove matches of Wikidata items", "\"\"\" start = mention['startOffset'] end = mention['endOffset'] surface = phrase[start:end] surface_score = self.bow.log_likelihood(surface)", "'indent':'off', }, headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging succeeded') resp = r.json() # Enhance", "to retrieve candidate items mentioned in the text. :param prune: if True, ignores", "mentions in the document :returns: the enhanced mention, as a Mention object \"\"\"", "compatible index and filled with documents - a bag of words language model,", "WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger = Tagger(bow, graph) while True: phrase = input('>>> ') tags", "mention in mentions if not self.prune_phrase(mention.phrase) ] return pruned_mentions def prune_phrase(self, phrase): \"\"\"", "= 23. + log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return Mention( phrase=surface, start=start, end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda", "import BOWLanguageModel from .wikidatagraph import WikidataGraph from .tag import Tag from .mention import", "mentions = [ self._create_mention(phrase, mention, docs, mentions_json) for mention in mentions_json ] pruned_mentions", "item :param mentions: the list of all mentions in the document :returns: the", "in range(len(lst)//2) } if __name__ == '__main__': import sys fname = sys.argv[1] print('Loading", "Solr and uses it to detect efficiently mentions of Wikidata items in text.", "for ranking by the classifier. :param phrase: the original document :param mention: the", "lst[2*k+1] for k in range(len(lst)//2) } if __name__ == '__main__': import sys fname", "bow self.graph = graph self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re = re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length =", "of all mentions in the document :returns: the enhanced mention, as a Mention", "k in range(len(lst)//2) } if __name__ == '__main__': import sys fname = sys.argv[1]", "a compatible index and filled with documents - a bag of words language", "graph self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re = re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length = 10000 def tag_and_rank(self,", "adequately trained, which will be used to evaluate the likelihood of phrases -", "'wt':'json', 'indent':'off', }, headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging succeeded') resp = r.json() #", "logger.debug('Tagging succeeded') resp = r.json() # Enhance mentions with page rank and edge", "a Mention object \"\"\" start = mention['startOffset'] end = mention['endOffset'] surface = phrase[start:end]", "mention, as a Mention object \"\"\" start = mention['startOffset'] end = mention['endOffset'] surface", "tag: -tag.rank)[:10], ) def _dictify(self, lst): \"\"\" Converts a list of [key1,val1,key2,val2,...] to", "from .mention import Mention # solr_collection = 'wd_multilingual' logger = logging.getLogger(__name__) class Tagger(object):", "has been adequately initialized with a compatible index and filled with documents -", "the text, or only consists of digits. This is mostly introduced to remove", "3 characters \"\"\" # Tag phrase = phrase[:self.max_length] logger.debug('Tagging text with solr (length", "import sys fname = sys.argv[1] print('Loading '+fname) bow = BOWLanguageModel() bow.load(fname) print('Loading '+sys.argv[2])", "more info to the mentions returned from Solr, to prepare them for ranking", "in mentions_json ] pruned_mentions = [ mention for mention in mentions if not", "so on. \"\"\" return self.prune_re.match(phrase) is not None and phrase.lower() == phrase def", "fname = sys.argv[1] print('Loading '+fname) bow = BOWLanguageModel() bow.load(fname) print('Loading '+sys.argv[2]) graph =", "= [ mention for mention in mentions if not self.prune_phrase(mention.phrase) ] return pruned_mentions", "in tag: del tag['edges'] if 'aliases' in tag: del tag['aliases'] print(json.dumps(tags, indent=2, sort_keys=True))", "log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10], ) def _dictify(self, lst): \"\"\" Converts a list", "] docs = { doc['id']:doc for doc in resp.get('response', {}).get('docs', []) } mentions", "tags: for tag in mention.get('tags', []): if 'edges' in tag: del tag['edges'] if", "wikidata graph, adequately loaded, which will be used to compute the page rank", "to enhance with scores :param docs: dictionary from qid to item :param mentions:", "digits. This is mostly introduced to remove matches of Wikidata items about characters,", "={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging succeeded') resp = r.json() # Enhance mentions with page", "between items \"\"\" self.bow = bow self.graph = graph self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re", "\"\"\" Given some text, use the solr index to retrieve candidate items mentioned", "'+sys.argv[2]) graph = WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger = Tagger(bow, graph) while True: phrase =", "language model, adequately trained, which will be used to evaluate the likelihood of", "[key1,val1,key2,val2,...] to a dict \"\"\" return { lst[2*k]: lst[2*k+1] for k in range(len(lst)//2)", "return pruned_mentions def prune_phrase(self, phrase): \"\"\" Should this phrase be pruned? It happens", "= dict(docs[qid].items()) item['rank'] = 23. + log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return Mention( phrase=surface, start=start, end=end,", "Solr, to prepare them for ranking by the classifier. :param phrase: the original", "+ log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return Mention( phrase=surface, start=start, end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10],", "been adequately initialized with a compatible index and filled with documents - a", "{})'.format(len(phrase))) r = requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off', }, headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8'))", "__name__ == '__main__': import sys fname = sys.argv[1] print('Loading '+fname) bow = BOWLanguageModel()", "mentions_json) for mention in mentions_json ] pruned_mentions = [ mention for mention in", "] return pruned_mentions def prune_phrase(self, phrase): \"\"\" Should this phrase be pruned? It", "for tag in mention.get('tags', []): if 'edges' in tag: del tag['edges'] if 'aliases'", "= graph self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re = re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length = 10000 def", "will be used to evaluate the likelihood of phrases - a wikidata graph,", "to remove matches of Wikidata items about characters, or to prevent short words", "solr_collection, bow, graph): \"\"\" Creates a tagger from: - a solr collection name,", "dict(docs[qid].items()) item['rank'] = 23. + log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return Mention( phrase=surface, start=start, end=end, log_likelihood=-surface_score,", "model, adequately trained, which will be used to evaluate the likelihood of phrases", "= r.json() # Enhance mentions with page rank and edge similarity mentions_json =", "in the text, or only consists of digits. This is mostly introduced to", "from .languagemodel import BOWLanguageModel from .wikidatagraph import WikidataGraph from .tag import Tag from", "\"of\" or \"in\" to match with initials \"OF\", \"IN\", as well as sport", "a dict \"\"\" return { lst[2*k]: lst[2*k+1] for k in range(len(lst)//2) } if", "'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off', }, headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging succeeded') resp =", "from .tag import Tag from .mention import Mention # solr_collection = 'wd_multilingual' logger", "happens when it is shorter than 3 characters and appears in lowercase in", "_dictify(self, lst): \"\"\" Converts a list of [key1,val1,key2,val2,...] to a dict \"\"\" return", ":param prune: if True, ignores lowercase mentions shorter than 3 characters \"\"\" #", "r.json() # Enhance mentions with page rank and edge similarity mentions_json = [", "doc in resp.get('response', {}).get('docs', []) } mentions = [ self._create_mention(phrase, mention, docs, mentions_json)", "params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off', }, headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging succeeded') resp", "item['rank'] = 23. + log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return Mention( phrase=surface, start=start, end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags,", "and uses it to detect efficiently mentions of Wikidata items in text. \"\"\"", "} if __name__ == '__main__': import sys fname = sys.argv[1] print('Loading '+fname) bow", "as \"of\" or \"in\" to match with initials \"OF\", \"IN\", as well as", "phrase[:self.max_length] logger.debug('Tagging text with solr (length {})'.format(len(phrase))) r = requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types',", "_create_mention(self, phrase, mention, docs, mentions): \"\"\" Adds more info to the mentions returned", "\"\"\" Converts a list of [key1,val1,key2,val2,...] to a dict \"\"\" return { lst[2*k]:", "this phrase be pruned? It happens when it is shorter than 3 characters", "info to the mentions returned from Solr, to prepare them for ranking by", "from qid to item :param mentions: the list of all mentions in the", "def _create_mention(self, phrase, mention, docs, mentions): \"\"\" Adds more info to the mentions", "mention in mentions_json ] pruned_mentions = [ mention for mention in mentions if", "which will be used to compute the page rank and the edges between", "from: - a solr collection name, which has been adequately initialized with a", "rank and edge similarity mentions_json = [ self._dictify(mention) for mention in resp.get('tags', [])", "Tag from .mention import Mention # solr_collection = 'wd_multilingual' logger = logging.getLogger(__name__) class", "classifier. :param phrase: the original document :param mention: the JSON mention to enhance", "(length {})'.format(len(phrase))) r = requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off', }, headers ={'Content-Type':'text/plain'},", "surface_score = self.bow.log_likelihood(surface) ranked_tags = [] for qid in mention['ids']: item = dict(docs[qid].items())", "evaluate the likelihood of phrases - a wikidata graph, adequately loaded, which will", "short words such as \"of\" or \"in\" to match with initials \"OF\", \"IN\",", "lst): \"\"\" Converts a list of [key1,val1,key2,val2,...] to a dict \"\"\" return {", "= 10000 def tag_and_rank(self, phrase, prune=True): \"\"\" Given some text, use the solr", "start=start, end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10], ) def _dictify(self, lst): \"\"\" Converts", "initialized with a compatible index and filled with documents - a bag of", "to the mentions returned from Solr, to prepare them for ranking by the", "requests import logging import re from math import log from .languagemodel import BOWLanguageModel", "import logging import re from math import log from .languagemodel import BOWLanguageModel from", "characters \"\"\" # Tag phrase = phrase[:self.max_length] logger.debug('Tagging text with solr (length {})'.format(len(phrase)))", "mention['startOffset'] end = mention['endOffset'] surface = phrase[start:end] surface_score = self.bow.log_likelihood(surface) ranked_tags = []", "bow, graph): \"\"\" Creates a tagger from: - a solr collection name, which", "[]): if 'edges' in tag: del tag['edges'] if 'aliases' in tag: del tag['aliases']", "data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging succeeded') resp = r.json() # Enhance mentions with page rank", "resp = r.json() # Enhance mentions with page rank and edge similarity mentions_json", "if True, ignores lowercase mentions shorter than 3 characters \"\"\" # Tag phrase", "prune_phrase(self, phrase): \"\"\" Should this phrase be pruned? It happens when it is", "and phrase.lower() == phrase def _create_mention(self, phrase, mention, docs, mentions): \"\"\" Adds more", "for mention in mentions_json ] pruned_mentions = [ mention for mention in mentions", "ranked_tags.append(Tag(**item)) return Mention( phrase=surface, start=start, end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10], ) def", "tags = tagger.tag_and_rank(phrase) for mention in tags: for tag in mention.get('tags', []): if", "to compute the page rank and the edges between items \"\"\" self.bow =", "while True: phrase = input('>>> ') tags = tagger.tag_and_rank(phrase) for mention in tags:", "phrase def _create_mention(self, phrase, mention, docs, mentions): \"\"\" Adds more info to the", "all mentions in the document :returns: the enhanced mention, as a Mention object", ":returns: the enhanced mention, as a Mention object \"\"\" start = mention['startOffset'] end", "Wikidata items in text. \"\"\" def __init__(self, solr_collection, bow, graph): \"\"\" Creates a", "mention to enhance with scores :param docs: dictionary from qid to item :param", "= input('>>> ') tags = tagger.tag_and_rank(phrase) for mention in tags: for tag in", "self.prune_phrase(mention.phrase) ] return pruned_mentions def prune_phrase(self, phrase): \"\"\" Should this phrase be pruned?", "'+fname) bow = BOWLanguageModel() bow.load(fname) print('Loading '+sys.argv[2]) graph = WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger =", "- a wikidata graph, adequately loaded, which will be used to compute the", "= requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off', }, headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging", "graph.load_pagerank(sys.argv[2]) tagger = Tagger(bow, graph) while True: phrase = input('>>> ') tags =", "Tagger(bow, graph) while True: phrase = input('>>> ') tags = tagger.tag_and_rank(phrase) for mention", "to prevent short words such as \"of\" or \"in\" to match with initials", "adequately initialized with a compatible index and filled with documents - a bag", "}, headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging succeeded') resp = r.json() # Enhance mentions", "in the document :returns: the enhanced mention, as a Mention object \"\"\" start", "and filled with documents - a bag of words language model, adequately trained,", "sys.argv[1] print('Loading '+fname) bow = BOWLanguageModel() bow.load(fname) print('Loading '+sys.argv[2]) graph = WikidataGraph() graph.load_pagerank(sys.argv[2])", "docs: dictionary from qid to item :param mentions: the list of all mentions", "\"\"\" Adds more info to the mentions returned from Solr, to prepare them", "tag in mention.get('tags', []): if 'edges' in tag: del tag['edges'] if 'aliases' in", "This is mostly introduced to remove matches of Wikidata items about characters, or", "in resp.get('tags', []) ] docs = { doc['id']:doc for doc in resp.get('response', {}).get('docs',", "in mentions if not self.prune_phrase(mention.phrase) ] return pruned_mentions def prune_phrase(self, phrase): \"\"\" Should", "in tags: for tag in mention.get('tags', []): if 'edges' in tag: del tag['edges']", "[] for qid in mention['ids']: item = dict(docs[qid].items()) item['rank'] = 23. + log(self.graph.get_pagerank(qid))", "{}).get('docs', []) } mentions = [ self._create_mention(phrase, mention, docs, mentions_json) for mention in", "Converts a list of [key1,val1,key2,val2,...] to a dict \"\"\" return { lst[2*k]: lst[2*k+1]", "'__main__': import sys fname = sys.argv[1] print('Loading '+fname) bow = BOWLanguageModel() bow.load(fname) print('Loading", "will be used to compute the page rank and the edges between items", "be used to evaluate the likelihood of phrases - a wikidata graph, adequately", ":param mention: the JSON mention to enhance with scores :param docs: dictionary from", "such as \"of\" or \"in\" to match with initials \"OF\", \"IN\", as well", "if __name__ == '__main__': import sys fname = sys.argv[1] print('Loading '+fname) bow =", "in text. \"\"\" def __init__(self, solr_collection, bow, graph): \"\"\" Creates a tagger from:", "and so on. \"\"\" return self.prune_re.match(phrase) is not None and phrase.lower() == phrase", "is shorter than 3 characters and appears in lowercase in the text, or", ".languagemodel import BOWLanguageModel from .wikidatagraph import WikidataGraph from .tag import Tag from .mention", "solr index to retrieve candidate items mentioned in the text. :param prune: if", "use the solr index to retrieve candidate items mentioned in the text. :param", "for mention in tags: for tag in mention.get('tags', []): if 'edges' in tag:", "resp.get('response', {}).get('docs', []) } mentions = [ self._create_mention(phrase, mention, docs, mentions_json) for mention", "characters, or to prevent short words such as \"of\" or \"in\" to match", "<reponame>heathersherry/opentapioca<filename>opentapioca/tagger.py import json import requests import logging import re from math import log", "if not self.prune_phrase(mention.phrase) ] return pruned_mentions def prune_phrase(self, phrase): \"\"\" Should this phrase", "items about characters, or to prevent short words such as \"of\" or \"in\"", "and appears in lowercase in the text, or only consists of digits. This", "of words language model, adequately trained, which will be used to evaluate the", "text with solr (length {})'.format(len(phrase))) r = requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off',", "# Enhance mentions with page rank and edge similarity mentions_json = [ self._dictify(mention)", "= re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length = 10000 def tag_and_rank(self, phrase, prune=True): \"\"\" Given some", "3 characters and appears in lowercase in the text, or only consists of", "{ lst[2*k]: lst[2*k+1] for k in range(len(lst)//2) } if __name__ == '__main__': import", "= mention['endOffset'] surface = phrase[start:end] surface_score = self.bow.log_likelihood(surface) ranked_tags = [] for qid", "return Mention( phrase=surface, start=start, end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10], ) def _dictify(self,", "than 3 characters \"\"\" # Tag phrase = phrase[:self.max_length] logger.debug('Tagging text with solr", "\"\"\" def __init__(self, solr_collection, bow, graph): \"\"\" Creates a tagger from: - a", "loaded, which will be used to compute the page rank and the edges", "for k in range(len(lst)//2) } if __name__ == '__main__': import sys fname =", "phrase = phrase[:self.max_length] logger.debug('Tagging text with solr (length {})'.format(len(phrase))) r = requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB',", "r = requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off', }, headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status()", "[ self._create_mention(phrase, mention, docs, mentions_json) for mention in mentions_json ] pruned_mentions = [", "the page rank and the edges between items \"\"\" self.bow = bow self.graph", "enhanced mention, as a Mention object \"\"\" start = mention['startOffset'] end = mention['endOffset']", "or only consists of digits. This is mostly introduced to remove matches of", "\"\"\" # Tag phrase = phrase[:self.max_length] logger.debug('Tagging text with solr (length {})'.format(len(phrase))) r", "') tags = tagger.tag_and_rank(phrase) for mention in tags: for tag in mention.get('tags', []):", "index and filled with documents - a bag of words language model, adequately", "mention, docs, mentions_json) for mention in mentions_json ] pruned_mentions = [ mention for", "\"\"\" return { lst[2*k]: lst[2*k+1] for k in range(len(lst)//2) } if __name__ ==", "Given some text, use the solr index to retrieve candidate items mentioned in", "as well as sport scores, postcodes, and so on. \"\"\" return self.prune_re.match(phrase) is", "= phrase[start:end] surface_score = self.bow.log_likelihood(surface) ranked_tags = [] for qid in mention['ids']: item", ".tag import Tag from .mention import Mention # solr_collection = 'wd_multilingual' logger =", "Mention object \"\"\" start = mention['startOffset'] end = mention['endOffset'] surface = phrase[start:end] surface_score", "bag of words language model, adequately trained, which will be used to evaluate", "it is shorter than 3 characters and appears in lowercase in the text,", "which will be used to evaluate the likelihood of phrases - a wikidata", "self._dictify(mention) for mention in resp.get('tags', []) ] docs = { doc['id']:doc for doc", "end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10], ) def _dictify(self, lst): \"\"\" Converts a", "edge similarity mentions_json = [ self._dictify(mention) for mention in resp.get('tags', []) ] docs", "range(len(lst)//2) } if __name__ == '__main__': import sys fname = sys.argv[1] print('Loading '+fname)", "tag_and_rank(self, phrase, prune=True): \"\"\" Given some text, use the solr index to retrieve", "of digits. This is mostly introduced to remove matches of Wikidata items about", "= mention['startOffset'] end = mention['endOffset'] surface = phrase[start:end] surface_score = self.bow.log_likelihood(surface) ranked_tags =", "object \"\"\" start = mention['startOffset'] end = mention['endOffset'] surface = phrase[start:end] surface_score =", "Mention # solr_collection = 'wd_multilingual' logger = logging.getLogger(__name__) class Tagger(object): \"\"\" The tagger", "mention in tags: for tag in mention.get('tags', []): if 'edges' in tag: del", "graph = WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger = Tagger(bow, graph) while True: phrase = input('>>>", "Adds more info to the mentions returned from Solr, to prepare them for", "re from math import log from .languagemodel import BOWLanguageModel from .wikidatagraph import WikidataGraph", "used to evaluate the likelihood of phrases - a wikidata graph, adequately loaded,", "remove matches of Wikidata items about characters, or to prevent short words such", "== '__main__': import sys fname = sys.argv[1] print('Loading '+fname) bow = BOWLanguageModel() bow.load(fname)", "logging.getLogger(__name__) class Tagger(object): \"\"\" The tagger indexes a Wikidata dump in Solr and", "key=lambda tag: -tag.rank)[:10], ) def _dictify(self, lst): \"\"\" Converts a list of [key1,val1,key2,val2,...]", "in mention['ids']: item = dict(docs[qid].items()) item['rank'] = 23. + log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return Mention(", "print('Loading '+fname) bow = BOWLanguageModel() bow.load(fname) print('Loading '+sys.argv[2]) graph = WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger", "WikidataGraph from .tag import Tag from .mention import Mention # solr_collection = 'wd_multilingual'", "uses it to detect efficiently mentions of Wikidata items in text. \"\"\" def", "in lowercase in the text, or only consists of digits. This is mostly", "as sport scores, postcodes, and so on. \"\"\" return self.prune_re.match(phrase) is not None", "phrase.lower() == phrase def _create_mention(self, phrase, mention, docs, mentions): \"\"\" Adds more info", "mentions of Wikidata items in text. \"\"\" def __init__(self, solr_collection, bow, graph): \"\"\"", "than 3 characters and appears in lowercase in the text, or only consists", "when it is shorter than 3 characters and appears in lowercase in the", "list of [key1,val1,key2,val2,...] to a dict \"\"\" return { lst[2*k]: lst[2*k+1] for k", "- a solr collection name, which has been adequately initialized with a compatible", "of Wikidata items in text. \"\"\" def __init__(self, solr_collection, bow, graph): \"\"\" Creates", "and the edges between items \"\"\" self.bow = bow self.graph = graph self.solr_endpoint", "\"\"\" Should this phrase be pruned? It happens when it is shorter than", "= WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger = Tagger(bow, graph) while True: phrase = input('>>> ')", "or to prevent short words such as \"of\" or \"in\" to match with", "the document :returns: the enhanced mention, as a Mention object \"\"\" start =", "in resp.get('response', {}).get('docs', []) } mentions = [ self._create_mention(phrase, mention, docs, mentions_json) for", "a wikidata graph, adequately loaded, which will be used to compute the page", "return { lst[2*k]: lst[2*k+1] for k in range(len(lst)//2) } if __name__ == '__main__':", "to a dict \"\"\" return { lst[2*k]: lst[2*k+1] for k in range(len(lst)//2) }", "mention for mention in mentions if not self.prune_phrase(mention.phrase) ] return pruned_mentions def prune_phrase(self,", "in the text. :param prune: if True, ignores lowercase mentions shorter than 3", "with initials \"OF\", \"IN\", as well as sport scores, postcodes, and so on.", "import re from math import log from .languagemodel import BOWLanguageModel from .wikidatagraph import", "It happens when it is shorter than 3 characters and appears in lowercase", "Tagger(object): \"\"\" The tagger indexes a Wikidata dump in Solr and uses it", "'wd_multilingual' logger = logging.getLogger(__name__) class Tagger(object): \"\"\" The tagger indexes a Wikidata dump", "on. \"\"\" return self.prune_re.match(phrase) is not None and phrase.lower() == phrase def _create_mention(self,", "self.prune_re = re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length = 10000 def tag_and_rank(self, phrase, prune=True): \"\"\" Given", "the text. :param prune: if True, ignores lowercase mentions shorter than 3 characters", "The tagger indexes a Wikidata dump in Solr and uses it to detect", "prevent short words such as \"of\" or \"in\" to match with initials \"OF\",", "sport scores, postcodes, and so on. \"\"\" return self.prune_re.match(phrase) is not None and", "Wikidata dump in Solr and uses it to detect efficiently mentions of Wikidata", "text. \"\"\" def __init__(self, solr_collection, bow, graph): \"\"\" Creates a tagger from: -", "] pruned_mentions = [ mention for mention in mentions if not self.prune_phrase(mention.phrase) ]", "Should this phrase be pruned? It happens when it is shorter than 3", "phrase be pruned? It happens when it is shorter than 3 characters and", "for doc in resp.get('response', {}).get('docs', []) } mentions = [ self._create_mention(phrase, mention, docs,", "mention: the JSON mention to enhance with scores :param docs: dictionary from qid", "docs = { doc['id']:doc for doc in resp.get('response', {}).get('docs', []) } mentions =", "qid to item :param mentions: the list of all mentions in the document", "retrieve candidate items mentioned in the text. :param prune: if True, ignores lowercase", "scores :param docs: dictionary from qid to item :param mentions: the list of", "bow = BOWLanguageModel() bow.load(fname) print('Loading '+sys.argv[2]) graph = WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger = Tagger(bow,", "adequately loaded, which will be used to compute the page rank and the", "[]) } mentions = [ self._create_mention(phrase, mention, docs, mentions_json) for mention in mentions_json", "\"\"\" self.bow = bow self.graph = graph self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re = re.compile(r'^(\\w\\w?|[\\d", "BOWLanguageModel() bow.load(fname) print('Loading '+sys.argv[2]) graph = WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger = Tagger(bow, graph) while", "ranked_tags = [] for qid in mention['ids']: item = dict(docs[qid].items()) item['rank'] = 23.", "similarity mentions_json = [ self._dictify(mention) for mention in resp.get('tags', []) ] docs =", "candidate items mentioned in the text. :param prune: if True, ignores lowercase mentions", "# solr_collection = 'wd_multilingual' logger = logging.getLogger(__name__) class Tagger(object): \"\"\" The tagger indexes", "in Solr and uses it to detect efficiently mentions of Wikidata items in", "graph, adequately loaded, which will be used to compute the page rank and", "self.max_length = 10000 def tag_and_rank(self, phrase, prune=True): \"\"\" Given some text, use the", "mostly introduced to remove matches of Wikidata items about characters, or to prevent", "mentioned in the text. :param prune: if True, ignores lowercase mentions shorter than", "match with initials \"OF\", \"IN\", as well as sport scores, postcodes, and so", "about characters, or to prevent short words such as \"of\" or \"in\" to", "appears in lowercase in the text, or only consists of digits. This is", "} mentions = [ self._create_mention(phrase, mention, docs, mentions_json) for mention in mentions_json ]", "documents - a bag of words language model, adequately trained, which will be", "None and phrase.lower() == phrase def _create_mention(self, phrase, mention, docs, mentions): \"\"\" Adds", "= BOWLanguageModel() bow.load(fname) print('Loading '+sys.argv[2]) graph = WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger = Tagger(bow, graph)", ".wikidatagraph import WikidataGraph from .tag import Tag from .mention import Mention # solr_collection", "mentions if not self.prune_phrase(mention.phrase) ] return pruned_mentions def prune_phrase(self, phrase): \"\"\" Should this", "sys fname = sys.argv[1] print('Loading '+fname) bow = BOWLanguageModel() bow.load(fname) print('Loading '+sys.argv[2]) graph", "[]) ] docs = { doc['id']:doc for doc in resp.get('response', {}).get('docs', []) }", "tagger indexes a Wikidata dump in Solr and uses it to detect efficiently", "phrase=surface, start=start, end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10], ) def _dictify(self, lst): \"\"\"", "'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off', }, headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging succeeded') resp = r.json()", "document :param mention: the JSON mention to enhance with scores :param docs: dictionary", "initials \"OF\", \"IN\", as well as sport scores, postcodes, and so on. \"\"\"", "the edges between items \"\"\" self.bow = bow self.graph = graph self.solr_endpoint =", "doc['id']:doc for doc in resp.get('response', {}).get('docs', []) } mentions = [ self._create_mention(phrase, mention,", "be used to compute the page rank and the edges between items \"\"\"", "[ mention for mention in mentions if not self.prune_phrase(mention.phrase) ] return pruned_mentions def", "self.graph = graph self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re = re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length = 10000", "log from .languagemodel import BOWLanguageModel from .wikidatagraph import WikidataGraph from .tag import Tag", "a solr collection name, which has been adequately initialized with a compatible index", "with page rank and edge similarity mentions_json = [ self._dictify(mention) for mention in", "= { doc['id']:doc for doc in resp.get('response', {}).get('docs', []) } mentions = [", "return self.prune_re.match(phrase) is not None and phrase.lower() == phrase def _create_mention(self, phrase, mention,", "= sys.argv[1] print('Loading '+fname) bow = BOWLanguageModel() bow.load(fname) print('Loading '+sys.argv[2]) graph = WikidataGraph()", "with scores :param docs: dictionary from qid to item :param mentions: the list", "text, use the solr index to retrieve candidate items mentioned in the text.", "phrase[start:end] surface_score = self.bow.log_likelihood(surface) ranked_tags = [] for qid in mention['ids']: item =", "words such as \"of\" or \"in\" to match with initials \"OF\", \"IN\", as", "index to retrieve candidate items mentioned in the text. :param prune: if True,", "\"OF\", \"IN\", as well as sport scores, postcodes, and so on. \"\"\" return", "the original document :param mention: the JSON mention to enhance with scores :param", "detect efficiently mentions of Wikidata items in text. \"\"\" def __init__(self, solr_collection, bow,", "document :returns: the enhanced mention, as a Mention object \"\"\" start = mention['startOffset']", "with solr (length {})'.format(len(phrase))) r = requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off', },", ".mention import Mention # solr_collection = 'wd_multilingual' logger = logging.getLogger(__name__) class Tagger(object): \"\"\"", "[ self._dictify(mention) for mention in resp.get('tags', []) ] docs = { doc['id']:doc for", "of phrases - a wikidata graph, adequately loaded, which will be used to", "requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off', }, headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging succeeded')", "the list of all mentions in the document :returns: the enhanced mention, as", "as a Mention object \"\"\" start = mention['startOffset'] end = mention['endOffset'] surface =", "for qid in mention['ids']: item = dict(docs[qid].items()) item['rank'] = 23. + log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item))", "pruned_mentions = [ mention for mention in mentions if not self.prune_phrase(mention.phrase) ] return", "= phrase[:self.max_length] logger.debug('Tagging text with solr (length {})'.format(len(phrase))) r = requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500,", "likelihood of phrases - a wikidata graph, adequately loaded, which will be used", "mention in resp.get('tags', []) ] docs = { doc['id']:doc for doc in resp.get('response',", "mentions: the list of all mentions in the document :returns: the enhanced mention,", "__init__(self, solr_collection, bow, graph): \"\"\" Creates a tagger from: - a solr collection", "# Tag phrase = phrase[:self.max_length] logger.debug('Tagging text with solr (length {})'.format(len(phrase))) r =", "list of all mentions in the document :returns: the enhanced mention, as a", "qid in mention['ids']: item = dict(docs[qid].items()) item['rank'] = 23. + log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return", "== phrase def _create_mention(self, phrase, mention, docs, mentions): \"\"\" Adds more info to", "the JSON mention to enhance with scores :param docs: dictionary from qid to", "import log from .languagemodel import BOWLanguageModel from .wikidatagraph import WikidataGraph from .tag import", "filled with documents - a bag of words language model, adequately trained, which", "= 'wd_multilingual' logger = logging.getLogger(__name__) class Tagger(object): \"\"\" The tagger indexes a Wikidata", "= logging.getLogger(__name__) class Tagger(object): \"\"\" The tagger indexes a Wikidata dump in Solr", "phrases - a wikidata graph, adequately loaded, which will be used to compute", "solr (length {})'.format(len(phrase))) r = requests.post(self.solr_endpoint, params={'overlaps':'NO_SUB', 'tagsLimit':500, 'fl':'id,label,aliases,extra_aliases,desc,nb_statements,nb_sitelinks,edges,types', 'wt':'json', 'indent':'off', }, headers", "not self.prune_phrase(mention.phrase) ] return pruned_mentions def prune_phrase(self, phrase): \"\"\" Should this phrase be", "phrase = input('>>> ') tags = tagger.tag_and_rank(phrase) for mention in tags: for tag", "import Tag from .mention import Mention # solr_collection = 'wd_multilingual' logger = logging.getLogger(__name__)", "True: phrase = input('>>> ') tags = tagger.tag_and_rank(phrase) for mention in tags: for", "the mentions returned from Solr, to prepare them for ranking by the classifier.", "mentions with page rank and edge similarity mentions_json = [ self._dictify(mention) for mention", "it to detect efficiently mentions of Wikidata items in text. \"\"\" def __init__(self,", "-tag.rank)[:10], ) def _dictify(self, lst): \"\"\" Converts a list of [key1,val1,key2,val2,...] to a", "Mention( phrase=surface, start=start, end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10], ) def _dictify(self, lst):", "mentions_json ] pruned_mentions = [ mention for mention in mentions if not self.prune_phrase(mention.phrase)", "scores, postcodes, and so on. \"\"\" return self.prune_re.match(phrase) is not None and phrase.lower()", "\"\"\" return self.prune_re.match(phrase) is not None and phrase.lower() == phrase def _create_mention(self, phrase,", "import WikidataGraph from .tag import Tag from .mention import Mention # solr_collection =", "some text, use the solr index to retrieve candidate items mentioned in the", "which has been adequately initialized with a compatible index and filled with documents", "def __init__(self, solr_collection, bow, graph): \"\"\" Creates a tagger from: - a solr", "mentions): \"\"\" Adds more info to the mentions returned from Solr, to prepare", "only consists of digits. This is mostly introduced to remove matches of Wikidata", "self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re = re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length = 10000 def tag_and_rank(self, phrase,", "prune: if True, ignores lowercase mentions shorter than 3 characters \"\"\" # Tag", "= bow self.graph = graph self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re = re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length", "succeeded') resp = r.json() # Enhance mentions with page rank and edge similarity", "= 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re = re.compile(r'^(\\w\\w?|[\\d ]{,4})$') self.max_length = 10000 def tag_and_rank(self, phrase, prune=True):", "\"\"\" Creates a tagger from: - a solr collection name, which has been", "input('>>> ') tags = tagger.tag_and_rank(phrase) for mention in tags: for tag in mention.get('tags',", "items \"\"\" self.bow = bow self.graph = graph self.solr_endpoint = 'http://localhost:8983/solr/{}/tag'.format(solr_collection) self.prune_re =", "be pruned? It happens when it is shorter than 3 characters and appears", "'edges' in tag: del tag['edges'] if 'aliases' in tag: del tag['aliases'] print(json.dumps(tags, indent=2,", "def tag_and_rank(self, phrase, prune=True): \"\"\" Given some text, use the solr index to", "True, ignores lowercase mentions shorter than 3 characters \"\"\" # Tag phrase =", ":param mentions: the list of all mentions in the document :returns: the enhanced", ":param phrase: the original document :param mention: the JSON mention to enhance with", "23. + log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return Mention( phrase=surface, start=start, end=end, log_likelihood=-surface_score, tags=sorted(ranked_tags, key=lambda tag:", "def _dictify(self, lst): \"\"\" Converts a list of [key1,val1,key2,val2,...] to a dict \"\"\"", "10000 def tag_and_rank(self, phrase, prune=True): \"\"\" Given some text, use the solr index", "= Tagger(bow, graph) while True: phrase = input('>>> ') tags = tagger.tag_and_rank(phrase) for", "from math import log from .languagemodel import BOWLanguageModel from .wikidatagraph import WikidataGraph from", "graph) while True: phrase = input('>>> ') tags = tagger.tag_and_rank(phrase) for mention in", "headers ={'Content-Type':'text/plain'}, data=phrase.encode('utf-8')) r.raise_for_status() logger.debug('Tagging succeeded') resp = r.json() # Enhance mentions with", "characters and appears in lowercase in the text, or only consists of digits.", "= self.bow.log_likelihood(surface) ranked_tags = [] for qid in mention['ids']: item = dict(docs[qid].items()) item['rank']", "\"\"\" The tagger indexes a Wikidata dump in Solr and uses it to", "dictionary from qid to item :param mentions: the list of all mentions in", "or \"in\" to match with initials \"OF\", \"IN\", as well as sport scores,", "]{,4})$') self.max_length = 10000 def tag_and_rank(self, phrase, prune=True): \"\"\" Given some text, use", "mentions returned from Solr, to prepare them for ranking by the classifier. :param", ":param docs: dictionary from qid to item :param mentions: the list of all", "pruned? It happens when it is shorter than 3 characters and appears in", "class Tagger(object): \"\"\" The tagger indexes a Wikidata dump in Solr and uses", "lst[2*k]: lst[2*k+1] for k in range(len(lst)//2) } if __name__ == '__main__': import sys", "and edge similarity mentions_json = [ self._dictify(mention) for mention in resp.get('tags', []) ]", "from Solr, to prepare them for ranking by the classifier. :param phrase: the", "items mentioned in the text. :param prune: if True, ignores lowercase mentions shorter", "- a bag of words language model, adequately trained, which will be used", "def prune_phrase(self, phrase): \"\"\" Should this phrase be pruned? It happens when it", "json import requests import logging import re from math import log from .languagemodel", "from .wikidatagraph import WikidataGraph from .tag import Tag from .mention import Mention #", "dump in Solr and uses it to detect efficiently mentions of Wikidata items", "shorter than 3 characters \"\"\" # Tag phrase = phrase[:self.max_length] logger.debug('Tagging text with", "them for ranking by the classifier. :param phrase: the original document :param mention:", "= [] for qid in mention['ids']: item = dict(docs[qid].items()) item['rank'] = 23. +", "{ doc['id']:doc for doc in resp.get('response', {}).get('docs', []) } mentions = [ self._create_mention(phrase,", "page rank and the edges between items \"\"\" self.bow = bow self.graph =", "surface = phrase[start:end] surface_score = self.bow.log_likelihood(surface) ranked_tags = [] for qid in mention['ids']:", "print('Loading '+sys.argv[2]) graph = WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger = Tagger(bow, graph) while True: phrase", "tags=sorted(ranked_tags, key=lambda tag: -tag.rank)[:10], ) def _dictify(self, lst): \"\"\" Converts a list of", "for mention in mentions if not self.prune_phrase(mention.phrase) ] return pruned_mentions def prune_phrase(self, phrase):", "name, which has been adequately initialized with a compatible index and filled with", "indexes a Wikidata dump in Solr and uses it to detect efficiently mentions", "mention.get('tags', []): if 'edges' in tag: del tag['edges'] if 'aliases' in tag: del", "postcodes, and so on. \"\"\" return self.prune_re.match(phrase) is not None and phrase.lower() ==", "bow.load(fname) print('Loading '+sys.argv[2]) graph = WikidataGraph() graph.load_pagerank(sys.argv[2]) tagger = Tagger(bow, graph) while True:", "collection name, which has been adequately initialized with a compatible index and filled", "a tagger from: - a solr collection name, which has been adequately initialized", "= [ self._dictify(mention) for mention in resp.get('tags', []) ] docs = { doc['id']:doc", "trained, which will be used to evaluate the likelihood of phrases - a", "lowercase in the text, or only consists of digits. This is mostly introduced", "the likelihood of phrases - a wikidata graph, adequately loaded, which will be", "to prepare them for ranking by the classifier. :param phrase: the original document", "solr collection name, which has been adequately initialized with a compatible index and", "logger = logging.getLogger(__name__) class Tagger(object): \"\"\" The tagger indexes a Wikidata dump in", "used to compute the page rank and the edges between items \"\"\" self.bow", "mentions shorter than 3 characters \"\"\" # Tag phrase = phrase[:self.max_length] logger.debug('Tagging text", "is mostly introduced to remove matches of Wikidata items about characters, or to", "item = dict(docs[qid].items()) item['rank'] = 23. + log(self.graph.get_pagerank(qid)) ranked_tags.append(Tag(**item)) return Mention( phrase=surface, start=start," ]
[ "\"\"\" def get_bigest_public_num(m, n): if n == 0: return m else: return get_bigest_public_num(n,", "== 0: return m else: return get_bigest_public_num(n, m % n) if __name__ ==", "n == 0: return m else: return get_bigest_public_num(n, m % n) if __name__", "问题描述:给定两个不等于0的整数M和N,求M和N的最大公约数. \"\"\" def get_bigest_public_num(m, n): if n == 0: return m else: return", "0: return m else: return get_bigest_public_num(n, m % n) if __name__ == '__main__':", "return m else: return get_bigest_public_num(n, m % n) if __name__ == '__main__': print(get_bigest_public_num(10,", "n): if n == 0: return m else: return get_bigest_public_num(n, m % n)", "\"\"\" 问题描述:给定两个不等于0的整数M和N,求M和N的最大公约数. \"\"\" def get_bigest_public_num(m, n): if n == 0: return m else:", "if n == 0: return m else: return get_bigest_public_num(n, m % n) if", "<filename>other/q2.py \"\"\" 问题描述:给定两个不等于0的整数M和N,求M和N的最大公约数. \"\"\" def get_bigest_public_num(m, n): if n == 0: return m", "get_bigest_public_num(m, n): if n == 0: return m else: return get_bigest_public_num(n, m %", "m else: return get_bigest_public_num(n, m % n) if __name__ == '__main__': print(get_bigest_public_num(10, 23))", "def get_bigest_public_num(m, n): if n == 0: return m else: return get_bigest_public_num(n, m" ]
[ "metric, criterion, optimizer, scheduler) #test_acc, test_loss = test(device, test_loader, model, metric, criterion) #", "print(stdout_temp.format(epoch+1, train_acc, train_loss)) # Save a model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1)", "arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\",", "'cpu' # Load dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names =", "Set loss function and optimization function. lr = trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay =", "def objective(trial): # Parse arguments. args = parse_args() # Set device. device =", "Set arguments. arg_parser = argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path',", "loss.backward() optimizer.step() scheduler.step() # Set data to calculate score. output_list += [int(o.argmax()) for", "args = arg_parser.parse_args() # Make directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate paths.", "Params: ') for key, value in trial.params.items(): print(' {}: {}'.format(key, value)) def objective(trial):", "train_loss = calc_score(output_list, target_list, running_loss, train_loader) return train_acc, train_loss def test(device, test_loader, model,", "of finished trials: ', len(study.trials)) print('Best trial:') trial = study.best_trial print(' Value: ',", "train_acc, train_loss, test_acc, test_loss)) stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss:", "{:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss)) # Calculate score. #train_acc, train_loss = calc_score(output_list, target_list,", "running_loss, n_batch, batch_idx, data_loader): # Calculate accuracy. result = classification_report(output_list, target_list, output_dict=True) acc", "os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert os.path.exists(args.model_ckpt_dir) return args if __name__ == \"__main__\": opt() #main()", "inputs.to(device), targets.to(device) features = model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets)", "optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() # Set data to calculate score. output_list += [int(o.argmax())", "model.to(device) #print(model) # Set a metric \"\"\" 'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014,", "metric_fc(features, targets) loss = criterion(outputs, targets) # Set data to calculate score. output_list", "nn.Linear(2048, n_feats) model = model.to(device) #print(model) # Set a metric \"\"\" 'n_feats': 256,", "\"\"\" 'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}.", "nn.CrossEntropyLoss() optimizer = optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size,", "(n_batch * (batch_idx+1)) else: loss = running_loss / len(data_loader.dataset) return acc, loss def", "directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate paths. assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert", "finished trials: ', len(study.trials)) print('Best trial:') trial = study.best_trial print(' Value: ', trial.value)", "{:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp = 'epoch: {:>3},", "0 and batch_idx != 0) or (batch_idx == len(train_loader)): stdout_temp = 'batch: {:>3}/{:<3},", "step_size=args.step_size, gamma=args.gamma) # Train and test. for epoch in range(args.n_epoch): # Train and", "torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms", "len(train_loader)): stdout_temp = 'batch: {:>3}/{:<3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader),", "metric_fc, criterion): model.eval() output_list = [] target_list = [] running_loss = 0.0 for", "https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2]) model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats)", "function and optimization function. lr = trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3,", "Set device. device = 'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset. if", "input feature') arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate: step", "type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number of epoch') arg_parser.add_argument('--lr',", "# Set arguments. arg_parser = argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/')", "function. lr = trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion =", "trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}], lr=lr,", "output_list = [] target_list = [] running_loss = 0.0 for batch_idx, (inputs, targets)", "= metric_fc(features, targets) loss = criterion(outputs, targets) # Set data to calculate score.", "= calc_score(output_list, target_list, running_loss, n_batch, batch_idx, train_loader) if (batch_idx % 100 == 0", "import torchvision.models as models def opt(): study = optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize') study.optimize(objective,", "running_loss, train_loader) return train_acc, train_loss def test(device, test_loader, model, metric_fc, criterion): model.eval() output_list", "'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss:", "') for key, value in trial.params.items(): print(' {}: {}'.format(key, value)) def objective(trial): #", "= inputs.to(device), targets.to(device).long() features = model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs,", "loss = running_loss / (n_batch * (batch_idx+1)) else: loss = running_loss / len(data_loader.dataset)", "default=12, type=int, help='The number of epoch') arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate') arg_parser.add_argument('--n_feats', default=581,", "n_batch, batch_idx, train_loader) if (batch_idx % 100 == 0 and batch_idx != 0)", "= train(device, train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler) #test_acc, test_loss = test(device,", "!= 0) or (batch_idx == len(train_loader)): stdout_temp = 'batch: {:>3}/{:<3}, train acc: {:<8},", "t in targets] running_loss += loss.item() test_acc, test_loss = calc_score(output_list, target_list, running_loss, test_loader)", "acc, loss def parse_args(): # Set arguments. arg_parser = argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name', default='Market1501')", "print(' {}: {}'.format(key, value)) def objective(trial): # Parse arguments. args = parse_args() #", "default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\",", "from sklearn.metrics import classification_report import pandas as pd import optuna from datasets import", "{'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\"", "Backward processing. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() # Set data to calculate score. output_list", "or (batch_idx == len(train_loader)): stdout_temp = 'batch: {:>3}/{:<3}, train acc: {:<8}, train loss:", "optimizer.step() scheduler.step() # Set data to calculate score. output_list += [int(o.argmax()) for o", "enumerate(test_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device) features = model(inputs) outputs", "arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument(\"--model_name\", type=str,", "stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss))", "train_acc def train(device, train_loader, n_batch, model, metric_fc, criterion, optimizer, scheduler): model.train() output_list =", "(batch_idx == len(train_loader)): stdout_temp = 'batch: {:>3}/{:<3}, train acc: {:<8}, train loss: {:<8}'", "targets] running_loss += loss.item() # Calculate score at present. train_acc, train_loss = calc_score(output_list,", "256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" norm", "= arg_parser.parse_args() # Make directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate paths. assert", "target_list, running_loss, test_loader) return test_acc, test_loss def calc_score(output_list, target_list, running_loss, n_batch, batch_idx, data_loader):", "optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and test. for epoch in range(args.n_epoch): # Train", "train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler) #test_acc, test_loss = test(device, test_loader, model,", "0.009787166658749052}. \"\"\" norm = trial.suggest_int('norm', 0, 30) margin = trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin", "False, 1 is True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='') arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm", "t in targets] running_loss += loss.item() # Calculate score at present. train_acc, train_loss", "loss.item() test_acc, test_loss = calc_score(output_list, target_list, running_loss, test_loader) return test_acc, test_loss def calc_score(output_list,", "import optuna from datasets import market1501 import metrics import torchvision.models as models def", "model.fc = nn.Linear(2048, n_feats) model = model.to(device) #print(model) # Set a metric \"\"\"", "{:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss,", "targets) # Backward processing. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() # Set data to calculate", "0) or (batch_idx == len(train_loader)): stdout_temp = 'batch: {:>3}/{:<3}, train acc: {:<8}, train", "= criterion(outputs, targets) # Set data to calculate score. output_list += [int(o.argmax()) for", "round(running_loss / len(data_loader.dataset), 6) if n_batch * batch_idx < len(data_loader.dataset): loss = running_loss", "Parse arguments. args = parse_args() # Set device. device = 'cuda' if torch.cuda.is_available()", "if torch.cuda.is_available() else 'cpu' # Load dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path)", "#print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train", "= round(result['weighted avg']['f1-score'], 6) #loss = round(running_loss / len(data_loader.dataset), 6) if n_batch *", "os import argparse from sklearn.metrics import classification_report import pandas as pd import optuna", "target_list += [int(t) for t in targets] running_loss += loss.item() # Calculate score", "return test_acc, test_loss def calc_score(output_list, target_list, running_loss, n_batch, batch_idx, data_loader): # Calculate accuracy.", "= models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats) model = model.to(device) #print(model) # Set a", "torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F", "#model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model checkpoint at {}'.format(model_ckpt_path))", "'batch: {:>3}/{:<3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss)) #", "help='Learning Rate: step size') arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate: gamma') \"\"\" {'n_feats': 256,", "weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and test. for epoch in", "train_loss)) # Save a model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path)", "{:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}'", "optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number of finished trials: ', len(study.trials)) print('Best trial:') trial =", "import pandas as pd import optuna from datasets import market1501 import metrics import", "batch_idx != 0) or (batch_idx == len(train_loader)): stdout_temp = 'batch: {:>3}/{:<3}, train acc:", "acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc,", "'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" args = arg_parser.parse_args() #", "+= loss.item() test_acc, test_loss = calc_score(output_list, target_list, running_loss, test_loader) return test_acc, test_loss def", "help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate: step size') arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning", "model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model checkpoint", "model.parameters()}, {'params': metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and", "# Validate paths. assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert os.path.exists(args.model_ckpt_dir) return args if __name__", "in enumerate(test_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device) features = model(inputs)", "train_loss = calc_score(output_list, target_list, running_loss, n_batch, batch_idx, train_loader) if (batch_idx % 100 ==", "[] target_list = [] running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(train_loader):", "in enumerate(train_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device).long() features = model(inputs)", "= argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32,", "arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number of epoch') arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate') arg_parser.add_argument('--n_feats',", "test_acc, test_loss = calc_score(output_list, target_list, running_loss, test_loader) return test_acc, test_loss def calc_score(output_list, target_list,", "train_loader) return train_acc, train_loss def test(device, test_loader, model, metric_fc, criterion): model.eval() output_list =", "epoch') arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate') arg_parser.add_argument('--n_feats', default=581, type=int, help='The number of base", "running_loss += loss.item() # Calculate score at present. train_acc, train_loss = calc_score(output_list, target_list,", "metric_fc(features, targets) loss = criterion(outputs, targets) # Backward processing. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step()", "is False, 1 is True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='') arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace:", "# Train and test a model. train_acc, train_loss = train(device, train_loader, args.n_batch, model,", "metric, criterion) # Output score. #stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train", "model, metric, criterion) # Output score. #stdout_temp = 'epoch: {:>3}, train acc: {:<8},", "= running_loss / len(data_loader.dataset) return acc, loss def parse_args(): # Set arguments. arg_parser", "', len(study.trials)) print('Best trial:') trial = study.best_trial print(' Value: ', trial.value) print(' Params:", "features = model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets) # Set", "metric \"\"\" 'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay':", "target_list, running_loss, n_batch, batch_idx, train_loader) if (batch_idx % 100 == 0 and batch_idx", "default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number of epoch') arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate')", "model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets) # Backward processing. optimizer.zero_grad()", "batch_idx, train_loader) if (batch_idx % 100 == 0 and batch_idx != 0) or", "train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss)) # Calculate score.", "pd import optuna from datasets import market1501 import metrics import torchvision.models as models", "print(' Value: ', trial.value) print(' Params: ') for key, value in trial.params.items(): print('", "= test(device, test_loader, model, metric, criterion) # Output score. #stdout_temp = 'epoch: {:>3},", "model.eval() output_list = [] target_list = [] running_loss = 0.0 for batch_idx, (inputs,", "market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch) # Set a model. # cf.", "criterion, optimizer, scheduler): model.train() output_list = [] target_list = [] running_loss = 0.0", "target_list, running_loss, n_batch, batch_idx, data_loader): # Calculate accuracy. result = classification_report(output_list, target_list, output_dict=True)", "def parse_args(): # Set arguments. arg_parser = argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/')", "targets) in enumerate(test_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device) features =", "torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn", "default=10, type=int, help='Learning Rate: step size') arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate: gamma') \"\"\"", "= trial.suggest_int('norm', 0, 30) margin = trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0,", "calc_score(output_list, target_list, running_loss, train_loader) return train_acc, train_loss def test(device, test_loader, model, metric_fc, criterion):", "n_trials=1000) print('Number of finished trials: ', len(study.trials)) print('Best trial:') trial = study.best_trial print('", "os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate paths. assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert os.path.exists(args.model_ckpt_dir) return args", "batch_idx, (inputs, targets) in enumerate(train_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device).long()", "running_loss, test_loader) return test_acc, test_loss def calc_score(output_list, target_list, running_loss, n_batch, batch_idx, data_loader): #", "targets) loss = criterion(outputs, targets) # Set data to calculate score. output_list +=", "trial.value) print(' Params: ') for key, value in trial.params.items(): print(' {}: {}'.format(key, value))", "# Set device. device = 'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset.", "= calc_score(output_list, target_list, running_loss, test_loader) return test_acc, test_loss def calc_score(output_list, target_list, running_loss, n_batch,", "Output score. #stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test", "#loss = round(running_loss / len(data_loader.dataset), 6) if n_batch * batch_idx < len(data_loader.dataset): loss", "cudnn import torchvision import torchvision.transforms as transforms import os import argparse from sklearn.metrics", "as nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as", "'weight_decay': 0.009787166658749052}. \"\"\" norm = trial.suggest_int('norm', 0, 30) margin = trial.suggest_uniform('margin', 0.0, 1e-3)", "= optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number of finished trials: ', len(study.trials)) print('Best trial:') trial", "6) if n_batch * batch_idx < len(data_loader.dataset): loss = running_loss / (n_batch *", "loss = criterion(outputs, targets) # Backward processing. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() # Set", "model output') arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is False, 1 is True') arg_parser.add_argument('--weight_decay', default=5e-4,", "train_loader) if (batch_idx % 100 == 0 and batch_idx != 0) or (batch_idx", "= 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test", "Forward processing. inputs, targets = inputs.to(device), targets.to(device).long() features = model(inputs) outputs = metric_fc(features,", "Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2]) model =", "train_acc, train_loss)) # Save a model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(),", "with PyTorch.''' import torch import torch.nn as nn import torch.optim as optim import", "test_loss)) stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc,", "arguments. arg_parser = argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv')", "def train(device, train_loader, n_batch, model, metric_fc, criterion, optimizer, scheduler): model.train() output_list = []", "criterion(outputs, targets) # Backward processing. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() # Set data to", "running_loss / (n_batch * (batch_idx+1)) else: loss = running_loss / len(data_loader.dataset) return acc,", "gamma') \"\"\" {'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay':", "n_batch, batch_idx, data_loader): # Calculate accuracy. result = classification_report(output_list, target_list, output_dict=True) acc =", "metric.to(device) # Set loss function and optimization function. lr = trial.suggest_uniform('lr', 1e-3, 1e-1)", "test_acc, test_loss)) stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch+1,", "import argparse from sklearn.metrics import classification_report import pandas as pd import optuna from", "acc = round(result['weighted avg']['f1-score'], 6) #loss = round(running_loss / len(data_loader.dataset), 6) if n_batch", "base model output') arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is False, 1 is True') arg_parser.add_argument('--weight_decay',", "torchvision import torchvision.transforms as transforms import os import argparse from sklearn.metrics import classification_report", "1 is True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='') arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm of", "= optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) #", "targets = inputs.to(device), targets.to(device) features = model(inputs) outputs = metric_fc(features, targets) loss =", "Train and test a model. train_acc, train_loss = train(device, train_loader, args.n_batch, model, metric,", "# Calculate accuracy. result = classification_report(output_list, target_list, output_dict=True) acc = round(result['weighted avg']['f1-score'], 6)", "targets = inputs.to(device), targets.to(device).long() features = model(inputs) outputs = metric_fc(features, targets) loss =", "model, metric, criterion, optimizer, scheduler) #test_acc, test_loss = test(device, test_loader, model, metric, criterion)", "checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model checkpoint at", "running_loss, n_batch, batch_idx, train_loader) if (batch_idx % 100 == 0 and batch_idx !=", "outputs = metric_fc(features, targets) loss = criterion(outputs, targets) # Set data to calculate", "'''Train CIFAR10 with PyTorch.''' import torch import torch.nn as nn import torch.optim as", "batch_idx, (inputs, targets) in enumerate(test_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device)", "classification_report(output_list, target_list, output_dict=True) acc = round(result['weighted avg']['f1-score'], 6) #loss = round(running_loss / len(data_loader.dataset),", "market1501.load_train_data(args.anno_path, args.n_batch) # Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256*1,", "and optimization function. lr = trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1)", "test_loss = calc_score(output_list, target_list, running_loss, test_loader) return test_acc, test_loss def calc_score(output_list, target_list, running_loss,", "1e-3, 1e-1) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}], lr=lr, weight_decay=weight_decay)", "arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm of input feature') arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin')", "[] running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(test_loader): # Forward processing.", "= trial.suggest_categorical('easy_margin', [0, 1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device) #", "loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss)) # Calculate score. #train_acc, train_loss = calc_score(output_list,", "0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" norm = trial.suggest_int('norm', 0, 30)", "targets.to(device) features = model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets) #", "# Load dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names = market1501.load_train_data(args.anno_path,", "scheduler) #test_acc, test_loss = test(device, test_loader, model, metric, criterion) # Output score. #stdout_temp", "inputs, targets = inputs.to(device), targets.to(device).long() features = model(inputs) outputs = metric_fc(features, targets) loss", "', trial.value) print(' Params: ') for key, value in trial.params.items(): print(' {}: {}'.format(key,", "objective(trial): # Parse arguments. args = parse_args() # Set device. device = 'cuda'", "for batch_idx, (inputs, targets) in enumerate(test_loader): # Forward processing. inputs, targets = inputs.to(device),", "= round(running_loss / len(data_loader.dataset), 6) if n_batch * batch_idx < len(data_loader.dataset): loss =", "is True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='') arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm of input", "type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate: step size') arg_parser.add_argument('--gamma', default=0.1, type=float,", "if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch) # Set", "'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" norm = trial.suggest_int('norm', 0,", "loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp = 'epoch: {:>3}, train acc:", "exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate paths. assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert os.path.exists(args.model_ckpt_dir) return", "assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert os.path.exists(args.model_ckpt_dir) return args if __name__ == \"__main__\": opt()", "[] running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(train_loader): # Forward processing.", "optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms", "CIFAR10 with PyTorch.''' import torch import torch.nn as nn import torch.optim as optim", "type=int, help='Learning Rate: step size') arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate: gamma') \"\"\" {'n_feats':", "trials: ', len(study.trials)) print('Best trial:') trial = study.best_trial print(' Value: ', trial.value) print('", "in targets] running_loss += loss.item() test_acc, test_loss = calc_score(output_list, target_list, running_loss, test_loader) return", "default=0.0008, type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate: step size') arg_parser.add_argument('--gamma', default=0.1,", "6) #loss = round(running_loss / len(data_loader.dataset), 6) if n_batch * batch_idx < len(data_loader.dataset):", "# Set loss function and optimization function. lr = trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay", "# cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2]) model = models.resnet50(pretrained=True) model.fc =", "= 'batch: {:>3}/{:<3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss))", "test_loss def calc_score(output_list, target_list, running_loss, n_batch, batch_idx, data_loader): # Calculate accuracy. result =", "score. output_list += [int(o.argmax()) for o in outputs] target_list += [int(t) for t", "#torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model checkpoint at {}'.format(model_ckpt_path)) #print('') return train_acc def train(device,", "argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int)", "as cudnn import torchvision import torchvision.transforms as transforms import os import argparse from", "0.009787166658749052}. \"\"\" args = arg_parser.parse_args() # Make directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) #", "enumerate(train_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device).long() features = model(inputs) outputs", "loss.item() # Calculate score at present. train_acc, train_loss = calc_score(output_list, target_list, running_loss, n_batch,", "processing. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() # Set data to calculate score. output_list +=", "= parse_args() # Set device. device = 'cuda' if torch.cuda.is_available() else 'cpu' #", "weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{'params': model.parameters()}, {'params':", "args.anno_path) train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch) # Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8", "< len(data_loader.dataset): loss = running_loss / (n_batch * (batch_idx+1)) else: loss = running_loss", "== 0 and batch_idx != 0) or (batch_idx == len(train_loader)): stdout_temp = 'batch:", "{}: {}'.format(key, value)) def objective(trial): # Parse arguments. args = parse_args() # Set", "Train and test. for epoch in range(args.n_epoch): # Train and test a model.", "loss = running_loss / len(data_loader.dataset) return acc, loss def parse_args(): # Set arguments.", "# Calculate score at present. train_acc, train_loss = calc_score(output_list, target_list, running_loss, n_batch, batch_idx,", "loss = criterion(outputs, targets) # Set data to calculate score. output_list += [int(o.argmax())", "arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number of epoch') arg_parser.add_argument('--lr', default=0.001, type=float,", "model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats) model = model.to(device) #print(model) # Set", "import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import", "avg']['f1-score'], 6) #loss = round(running_loss / len(data_loader.dataset), 6) if n_batch * batch_idx <", "1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0, 1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin)", "for o in outputs] target_list += [int(t) for t in targets] running_loss +=", "= trial.suggest_categorical('n_feats', [256*1, 256*2]) model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats) model =", "trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0, 1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm,", "len(data_loader.dataset), 6) if n_batch * batch_idx < len(data_loader.dataset): loss = running_loss / (n_batch", "step size') arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate: gamma') \"\"\" {'n_feats': 256, 'norm': 5,", "len(data_loader.dataset): loss = running_loss / (n_batch * (batch_idx+1)) else: loss = running_loss /", "os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate paths. assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert os.path.exists(args.model_ckpt_dir)", "import classification_report import pandas as pd import optuna from datasets import market1501 import", "= trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion = nn.CrossEntropyLoss() optimizer", "present. train_acc, train_loss = calc_score(output_list, target_list, running_loss, n_batch, batch_idx, train_loader) if (batch_idx %", "= [] running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(train_loader): # Forward", "output_dict=True) acc = round(result['weighted avg']['f1-score'], 6) #loss = round(running_loss / len(data_loader.dataset), 6) if", "as pd import optuna from datasets import market1501 import metrics import torchvision.models as", "[256*1, 256*2]) model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats) model = model.to(device) #print(model)", "test a model. train_acc, train_loss = train(device, train_loader, args.n_batch, model, metric, criterion, optimizer,", "{:<8}, train loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss)) # Save a model checkpoint. #model_ckpt_path", "(batch_idx % 100 == 0 and batch_idx != 0) or (batch_idx == len(train_loader)):", "study.optimize(objective, n_trials=1000) print('Number of finished trials: ', len(study.trials)) print('Best trial:') trial = study.best_trial", "norm of input feature') arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10, type=int, help='Learning", "import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as", "# Backward processing. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() # Set data to calculate score.", "models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats) model = model.to(device) #print(model) # Set a metric", "args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model checkpoint at {}'.format(model_ckpt_path)) #print('') return", "criterion, optimizer, scheduler) #test_acc, test_loss = test(device, test_loader, model, metric, criterion) # Output", "loss: {:<8}, test acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss))", "+= [int(o.argmax()) for o in outputs] target_list += [int(t) for t in targets]", "default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str,", "(inputs, targets) in enumerate(train_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device).long() features", "/ len(data_loader.dataset), 6) if n_batch * batch_idx < len(data_loader.dataset): loss = running_loss /", "'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" norm = trial.suggest_int('norm', 0, 30) margin", "acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss)) # Save a model checkpoint.", "256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" args", "# Parse arguments. args = parse_args() # Set device. device = 'cuda' if", "metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device) # Set loss function and", "trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion = nn.CrossEntropyLoss() optimizer =", "target_list, running_loss, train_loader) return train_acc, train_loss def test(device, test_loader, model, metric_fc, criterion): model.eval()", "as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision import", "in outputs] target_list += [int(t) for t in targets] running_loss += loss.item() test_acc,", "type=float, help='') arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm of input feature') arg_parser.add_argument('--margin', default=0.0008, type=float,", "= model.to(device) #print(model) # Set a metric \"\"\" 'n_feats': 256, 'norm': 5, 'margin':", "#print('Saved a model checkpoint at {}'.format(model_ckpt_path)) #print('') return train_acc def train(device, train_loader, n_batch,", "targets] running_loss += loss.item() test_acc, test_loss = calc_score(output_list, target_list, running_loss, test_loader) return test_acc,", "0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" norm = trial.suggest_int('norm', 0, 30) margin =", "in trial.params.items(): print(' {}: {}'.format(key, value)) def objective(trial): # Parse arguments. args =", "[0, 1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device) # Set loss", "loss def parse_args(): # Set arguments. arg_parser = argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir',", "type=int, help='ArcFace: norm of input feature') arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10,", "import market1501 import metrics import torchvision.models as models def opt(): study = optuna.create_study(direction='maximize')", "print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss)) # Calculate score. #train_acc, train_loss = calc_score(output_list, target_list, running_loss,", "+= [int(t) for t in targets] running_loss += loss.item() test_acc, test_loss = calc_score(output_list,", "help='The number of base model output') arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is False, 1", "a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2]) model = models.resnet50(pretrained=True)", "= trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}],", "target_list = [] running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(test_loader): #", "processing. inputs, targets = inputs.to(device), targets.to(device) features = model(inputs) outputs = metric_fc(features, targets)", "model.train() output_list = [] target_list = [] running_loss = 0.0 for batch_idx, (inputs,", "# Save a model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved", "metrics import torchvision.models as models def opt(): study = optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize')", "train(device, train_loader, n_batch, model, metric_fc, criterion, optimizer, scheduler): model.train() output_list = [] target_list", "optimizer = optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)", "score at present. train_acc, train_loss = calc_score(output_list, target_list, running_loss, n_batch, batch_idx, train_loader) if", "return acc, loss def parse_args(): # Set arguments. arg_parser = argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name',", "epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model checkpoint at {}'.format(model_ckpt_path)) #print('') return train_acc def", "import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms import os import", "output_list += [int(o.argmax()) for o in outputs] target_list += [int(t) for t in", "rate') arg_parser.add_argument('--n_feats', default=581, type=int, help='The number of base model output') arg_parser.add_argument('--easy_margin', default=1, type=int,", "'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" args = arg_parser.parse_args() # Make directory. os.makedirs(args.anno_dir, exist_ok=True)", "type=int) arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int,", "0.0 for batch_idx, (inputs, targets) in enumerate(train_loader): # Forward processing. inputs, targets =", "for key, value in trial.params.items(): print(' {}: {}'.format(key, value)) def objective(trial): # Parse", "Validate paths. assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert os.path.exists(args.model_ckpt_dir) return args if __name__ ==", "train_acc, train_loss)) # Calculate score. #train_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader) return", "# Calculate score. #train_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader) return train_acc, train_loss", "running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(train_loader): # Forward processing. inputs,", "'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" norm =", "model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets) # Set data to", "arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is False, 1 is True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='')", "train_loss, test_acc, test_loss)) stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}'", "train_loss = train(device, train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler) #test_acc, test_loss =", "calc_score(output_list, target_list, running_loss, test_loader) return test_acc, test_loss def calc_score(output_list, target_list, running_loss, n_batch, batch_idx,", "value in trial.params.items(): print(' {}: {}'.format(key, value)) def objective(trial): # Parse arguments. args", "criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler =", "loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss)) # Save a model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name,", "return train_acc def train(device, train_loader, n_batch, model, metric_fc, criterion, optimizer, scheduler): model.train() output_list", "'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" args = arg_parser.parse_args() # Make directory.", "train_acc, train_loss = train(device, train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler) #test_acc, test_loss", "arg_parser.parse_args() # Make directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate paths. assert os.path.exists(args.data_dir)", "n_batch, model, metric_fc, criterion, optimizer, scheduler): model.train() output_list = [] target_list = []", "default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth')", "test(device, test_loader, model, metric, criterion) # Output score. #stdout_temp = 'epoch: {:>3}, train", "else 'cpu' # Load dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names", "output') arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is False, 1 is True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float,", "n_feats) model = model.to(device) #print(model) # Set a metric \"\"\" 'n_feats': 256, 'norm':", "gamma=args.gamma) # Train and test. for epoch in range(args.n_epoch): # Train and test", "margin = trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0, 1]) metric = metrics.ArcMarginProduct(n_feats,", "for epoch in range(args.n_epoch): # Train and test a model. train_acc, train_loss =", "study.best_trial print(' Value: ', trial.value) print(' Params: ') for key, value in trial.params.items():", "in targets] running_loss += loss.item() # Calculate score at present. train_acc, train_loss =", "[int(t) for t in targets] running_loss += loss.item() test_acc, test_loss = calc_score(output_list, target_list,", "opt(): study = optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number of finished trials:", "train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss)) # Save a model", "a model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model", "train_acc, train_loss = calc_score(output_list, target_list, running_loss, n_batch, batch_idx, train_loader) if (batch_idx % 100", "trial:') trial = study.best_trial print(' Value: ', trial.value) print(' Params: ') for key,", "easy_margin=easy_margin) metric.to(device) # Set loss function and optimization function. lr = trial.suggest_uniform('lr', 1e-3,", "== len(train_loader)): stdout_temp = 'batch: {:>3}/{:<3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(batch_idx,", "torchvision.transforms as transforms import os import argparse from sklearn.metrics import classification_report import pandas", "256*2]) model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats) model = model.to(device) #print(model) #", "and batch_idx != 0) or (batch_idx == len(train_loader)): stdout_temp = 'batch: {:>3}/{:<3}, train", "running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(test_loader): # Forward processing. inputs,", "import torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as", "5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" norm = trial.suggest_int('norm',", "test_loader) return test_acc, test_loss def calc_score(output_list, target_list, running_loss, n_batch, batch_idx, data_loader): # Calculate", "= optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and test. for epoch in range(args.n_epoch): #", "1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device) # Set loss function", "scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and test. for epoch in range(args.n_epoch):", "optuna from datasets import market1501 import metrics import torchvision.models as models def opt():", "criterion): model.eval() output_list = [] target_list = [] running_loss = 0.0 for batch_idx,", "# Forward processing. inputs, targets = inputs.to(device), targets.to(device).long() features = model(inputs) outputs =", "outputs = metric_fc(features, targets) loss = criterion(outputs, targets) # Backward processing. optimizer.zero_grad() loss.backward()", "features = model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets) # Backward", "len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device) # Set loss function and optimization function. lr", "epoch in range(args.n_epoch): # Train and test a model. train_acc, train_loss = train(device,", "to calculate score. output_list += [int(o.argmax()) for o in outputs] target_list += [int(t)", "target_list, output_dict=True) acc = round(result['weighted avg']['f1-score'], 6) #loss = round(running_loss / len(data_loader.dataset), 6)", "args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model checkpoint at {}'.format(model_ckpt_path)) #print('') return train_acc", "= 0.0 for batch_idx, (inputs, targets) in enumerate(test_loader): # Forward processing. inputs, targets", "if n_batch * batch_idx < len(data_loader.dataset): loss = running_loss / (n_batch * (batch_idx+1))", "test(device, test_loader, model, metric_fc, criterion): model.eval() output_list = [] target_list = [] running_loss", "study = optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number of finished trials: ',", "arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate') arg_parser.add_argument('--n_feats', default=581, type=int, help='The number of base model", "number of base model output') arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is False, 1 is", "as models def opt(): study = optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number", "# Set data to calculate score. output_list += [int(o.argmax()) for o in outputs]", "{:<8}, test acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp", "= [] target_list = [] running_loss = 0.0 for batch_idx, (inputs, targets) in", "of epoch') arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate') arg_parser.add_argument('--n_feats', default=581, type=int, help='The number of", "models def opt(): study = optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number of", "(inputs, targets) in enumerate(test_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device) features", "acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss)) # Calculate score. #train_acc,", "type=int, help='The number of epoch') arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate') arg_parser.add_argument('--n_feats', default=581, type=int,", "paths. assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert os.path.exists(args.model_ckpt_dir) return args if __name__ == \"__main__\":", "30) margin = trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0, 1]) metric =", "'weight_decay': 0.009787166658749052}. \"\"\" args = arg_parser.parse_args() # Make directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True)", "'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" norm = trial.suggest_int('norm', 0, 30) margin = trial.suggest_uniform('margin',", "Classification\") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument(\"--model_name\",", "len(study.trials)) print('Best trial:') trial = study.best_trial print(' Value: ', trial.value) print(' Params: ')", "\"\"\" {'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}.", "help='The number of epoch') arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate') arg_parser.add_argument('--n_feats', default=581, type=int, help='The", "arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate: step size') arg_parser.add_argument('--gamma',", "torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms import os import argparse", "{'params': metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and test.", "torchvision.models as models def opt(): study = optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000)", "value)) def objective(trial): # Parse arguments. args = parse_args() # Set device. device", "PyTorch.''' import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional", "#train_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader) return train_acc, train_loss def test(device, test_loader,", "help='Learning Rate: gamma') \"\"\" {'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr':", "False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch) # Set a model. #", "n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2]) model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats) model", "trial = study.best_trial print(' Value: ', trial.value) print(' Params: ') for key, value", "arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int, help='The", "of base model output') arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is False, 1 is True')", "type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number of epoch') arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning", "{:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss)) # Save a model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name,", "for t in targets] running_loss += loss.item() # Calculate score at present. train_acc,", "optimization function. lr = trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion", "= running_loss / (n_batch * (batch_idx+1)) else: loss = running_loss / len(data_loader.dataset) return", "range(args.n_epoch): # Train and test a model. train_acc, train_loss = train(device, train_loader, args.n_batch,", "0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0, 1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin,", "class_names = market1501.load_train_data(args.anno_path, args.n_batch) # Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats =", "for t in targets] running_loss += loss.item() test_acc, test_loss = calc_score(output_list, target_list, running_loss,", "+= loss.item() # Calculate score at present. train_acc, train_loss = calc_score(output_list, target_list, running_loss,", "test. for epoch in range(args.n_epoch): # Train and test a model. train_acc, train_loss", "= optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number of finished trials: ', len(study.trials))", "import torchvision import torchvision.transforms as transforms import os import argparse from sklearn.metrics import", "{}'.format(key, value)) def objective(trial): # Parse arguments. args = parse_args() # Set device.", "#print(model) # Set a metric \"\"\" 'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin':", "of input feature') arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate:", "#test_acc, test_loss = test(device, test_loader, model, metric, criterion) # Output score. #stdout_temp =", "if (batch_idx % 100 == 0 and batch_idx != 0) or (batch_idx ==", "args = parse_args() # Set device. device = 'cuda' if torch.cuda.is_available() else 'cpu'", "# Forward processing. inputs, targets = inputs.to(device), targets.to(device) features = model(inputs) outputs =", "arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='') arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm of input feature') arg_parser.add_argument('--margin',", "target_list += [int(t) for t in targets] running_loss += loss.item() test_acc, test_loss =", "default=5e-4, type=float, help='') arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm of input feature') arg_parser.add_argument('--margin', default=0.0008,", "= 'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset. if os.path.exists(args.anno_path) == False:", "trial.suggest_categorical('easy_margin', [0, 1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device) # Set", "# Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2]) model", "in range(args.n_epoch): # Train and test a model. train_acc, train_loss = train(device, train_loader,", "return train_acc, train_loss def test(device, test_loader, model, metric_fc, criterion): model.eval() output_list = []", "default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number of", "help='Learning rate') arg_parser.add_argument('--n_feats', default=581, type=int, help='The number of base model output') arg_parser.add_argument('--easy_margin', default=1,", "Rate: step size') arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate: gamma') \"\"\" {'n_feats': 256, 'norm':", "model, metric_fc, criterion, optimizer, scheduler): model.train() output_list = [] target_list = [] running_loss", "test_loader, model, metric, criterion) # Output score. #stdout_temp = 'epoch: {:>3}, train acc:", "= metric_fc(features, targets) loss = criterion(outputs, targets) # Backward processing. optimizer.zero_grad() loss.backward() optimizer.step()", "[int(o.argmax()) for o in outputs] target_list += [int(t) for t in targets] running_loss", "sklearn.metrics import classification_report import pandas as pd import optuna from datasets import market1501", "default=1, type=int, help='ArcFace: norm of input feature') arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size',", "default=32, type=int) arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12,", "test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp = 'epoch: {:>3}, train", "train_loss def test(device, test_loader, model, metric_fc, criterion): model.eval() output_list = [] target_list =", "targets) # Set data to calculate score. output_list += [int(o.argmax()) for o in", "train_loss)) # Calculate score. #train_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader) return train_acc,", "{:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp = 'epoch: {:>3}, train acc: {:<8},", "exist_ok=True) # Validate paths. assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir) assert os.path.exists(args.model_ckpt_dir) return args if", "1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{'params':", "def calc_score(output_list, target_list, running_loss, n_batch, batch_idx, data_loader): # Calculate accuracy. result = classification_report(output_list,", "as transforms import os import argparse from sklearn.metrics import classification_report import pandas as", "inputs, targets = inputs.to(device), targets.to(device) features = model(inputs) outputs = metric_fc(features, targets) loss", "optimizer, scheduler): model.train() output_list = [] target_list = [] running_loss = 0.0 for", "model. train_acc, train_loss = train(device, train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler) #test_acc,", "#print('') return train_acc def train(device, train_loader, n_batch, model, metric_fc, criterion, optimizer, scheduler): model.train()", "Make directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate paths. assert os.path.exists(args.data_dir) assert os.path.exists(args.anno_dir)", "metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device) # Set loss function and optimization function.", "cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2]) model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048,", "train loss: {:<8}, test acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc,", "= 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss)) #", "test_loader, model, metric_fc, criterion): model.eval() output_list = [] target_list = [] running_loss =", "len(data_loader.dataset) return acc, loss def parse_args(): # Set arguments. arg_parser = argparse.ArgumentParser(description=\"Image Classification\")", "= market1501.load_train_data(args.anno_path, args.n_batch) # Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats',", "Save a model checkpoint. #model_ckpt_path = args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a", "checkpoint at {}'.format(model_ckpt_path)) #print('') return train_acc def train(device, train_loader, n_batch, model, metric_fc, criterion,", "as F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms import", "# Train and test. for epoch in range(args.n_epoch): # Train and test a", "target_list = [] running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(train_loader): #", "scheduler.step() # Set data to calculate score. output_list += [int(o.argmax()) for o in", "feature') arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate: step size')", "#stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8},", "arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str,", "arguments. args = parse_args() # Set device. device = 'cuda' if torch.cuda.is_available() else", "{}'.format(model_ckpt_path)) #print('') return train_acc def train(device, train_loader, n_batch, model, metric_fc, criterion, optimizer, scheduler):", "loss function and optimization function. lr = trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay',", "test_loss = test(device, test_loader, model, metric, criterion) # Output score. #stdout_temp = 'epoch:", "#study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number of finished trials: ', len(study.trials)) print('Best trial:')", "and test a model. train_acc, train_loss = train(device, train_loader, args.n_batch, model, metric, criterion,", "trial.params.items(): print(' {}: {}'.format(key, value)) def objective(trial): # Parse arguments. args = parse_args()", "margin') arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate: step size') arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate:", "Rate: gamma') \"\"\" {'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862,", "scheduler): model.train() output_list = [] target_list = [] running_loss = 0.0 for batch_idx,", "train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch) # Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats", "key, value in trial.params.items(): print(' {}: {}'.format(key, value)) def objective(trial): # Parse arguments.", "/ (n_batch * (batch_idx+1)) else: loss = running_loss / len(data_loader.dataset) return acc, loss", "Calculate accuracy. result = classification_report(output_list, target_list, output_dict=True) acc = round(result['weighted avg']['f1-score'], 6) #loss", "result = classification_report(output_list, target_list, output_dict=True) acc = round(result['weighted avg']['f1-score'], 6) #loss = round(running_loss", "'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir,", "arg_parser = argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch',", "train_acc, train_loss def test(device, test_loader, model, metric_fc, criterion): model.eval() output_list = [] target_list", "= nn.CrossEntropyLoss() optimizer = optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer,", "0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" args = arg_parser.parse_args() # Make directory. os.makedirs(args.anno_dir,", "print(' Params: ') for key, value in trial.params.items(): print(' {}: {}'.format(key, value)) def", "acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp = 'epoch:", "os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch) # Set a", "nn import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn", "help='ArcFace: norm of input feature') arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace: margin') arg_parser.add_argument('--step_size', default=10, type=int,", "test_acc, test_loss def calc_score(output_list, target_list, running_loss, n_batch, batch_idx, data_loader): # Calculate accuracy. result", "help='0 is False, 1 is True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='') arg_parser.add_argument('--norm', default=1, type=int,", "== False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch) # Set a model.", "= model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets) # Backward processing.", "100 == 0 and batch_idx != 0) or (batch_idx == len(train_loader)): stdout_temp =", "torch.cuda.is_available() else 'cpu' # Load dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader,", "* (batch_idx+1)) else: loss = running_loss / len(data_loader.dataset) return acc, loss def parse_args():", "train loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss)) # Save a model checkpoint. #model_ckpt_path =", "'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\"", "from datasets import market1501 import metrics import torchvision.models as models def opt(): study", "= nn.Linear(2048, n_feats) model = model.to(device) #print(model) # Set a metric \"\"\" 'n_feats':", "targets) loss = criterion(outputs, targets) # Backward processing. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() #", "import os import argparse from sklearn.metrics import classification_report import pandas as pd import", "processing. inputs, targets = inputs.to(device), targets.to(device).long() features = model(inputs) outputs = metric_fc(features, targets)", "# Output score. #stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8},", "% 100 == 0 and batch_idx != 0) or (batch_idx == len(train_loader)): stdout_temp", "parse_args() # Set device. device = 'cuda' if torch.cuda.is_available() else 'cpu' # Load", "args.n_batch, model, metric, criterion, optimizer, scheduler) #test_acc, test_loss = test(device, test_loader, model, metric,", "s=norm, m=margin, easy_margin=easy_margin) metric.to(device) # Set loss function and optimization function. lr =", "a model checkpoint at {}'.format(model_ckpt_path)) #print('') return train_acc def train(device, train_loader, n_batch, model,", "/ len(data_loader.dataset) return acc, loss def parse_args(): # Set arguments. arg_parser = argparse.ArgumentParser(description=\"Image", "train(device, train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler) #test_acc, test_loss = test(device, test_loader,", "help='') arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm of input feature') arg_parser.add_argument('--margin', default=0.0008, type=float, help='ArcFace:", "argparse from sklearn.metrics import classification_report import pandas as pd import optuna from datasets", "arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number of epoch')", "\"\"\" args = arg_parser.parse_args() # Make directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate", "{:<8}, train loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss)) # Calculate score. #train_acc, train_loss", "for batch_idx, (inputs, targets) in enumerate(train_loader): # Forward processing. inputs, targets = inputs.to(device),", "{:>3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss)) # Save a", "trial.suggest_int('norm', 0, 30) margin = trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0, 1])", "o in outputs] target_list += [int(t) for t in targets] running_loss += loss.item()", "parse_args(): # Set arguments. arg_parser = argparse.ArgumentParser(description=\"Image Classification\") arg_parser.add_argument('--dataset_name', default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir',", "0.0 for batch_idx, (inputs, targets) in enumerate(test_loader): # Forward processing. inputs, targets =", "default=0.1, type=float, help='Learning Rate: gamma') \"\"\" {'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin':", "and test. for epoch in range(args.n_epoch): # Train and test a model. train_acc,", "dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch) #", "type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number", "in outputs] target_list += [int(t) for t in targets] running_loss += loss.item() #", "at present. train_acc, train_loss = calc_score(output_list, target_list, running_loss, n_batch, batch_idx, train_loader) if (batch_idx", "len(train_loader), train_acc, train_loss)) # Calculate score. #train_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader)", "model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2]) model = models.resnet50(pretrained=True) model.fc", "Load dataset. if os.path.exists(args.anno_path) == False: market1501.make_train_anno(args.data_dir, args.anno_path) train_loader, class_names = market1501.load_train_data(args.anno_path, args.n_batch)", "transforms import os import argparse from sklearn.metrics import classification_report import pandas as pd", "model checkpoint at {}'.format(model_ckpt_path)) #print('') return train_acc def train(device, train_loader, n_batch, model, metric_fc,", "5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" args = arg_parser.parse_args()", "m=margin, easy_margin=easy_margin) metric.to(device) # Set loss function and optimization function. lr = trial.suggest_uniform('lr',", "data to calculate score. output_list += [int(o.argmax()) for o in outputs] target_list +=", "= [] running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(test_loader): # Forward", "easy_margin = trial.suggest_categorical('easy_margin', [0, 1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device)", "n_batch * batch_idx < len(data_loader.dataset): loss = running_loss / (n_batch * (batch_idx+1)) else:", "train_loader, n_batch, model, metric_fc, criterion, optimizer, scheduler): model.train() output_list = [] target_list =", "inputs.to(device), targets.to(device).long() features = model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets)", "= metrics.ArcMarginProduct(n_feats, len(class_names), s=norm, m=margin, easy_margin=easy_margin) metric.to(device) # Set loss function and optimization", "targets) in enumerate(train_loader): # Forward processing. inputs, targets = inputs.to(device), targets.to(device).long() features =", "score. #train_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader) return train_acc, train_loss def test(device,", "batch_idx < len(data_loader.dataset): loss = running_loss / (n_batch * (batch_idx+1)) else: loss =", "Set a metric \"\"\" 'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr':", "1e-1) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler", "round(result['weighted avg']['f1-score'], 6) #loss = round(running_loss / len(data_loader.dataset), 6) if n_batch * batch_idx", "torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import torchvision", "model = model.to(device) #print(model) # Set a metric \"\"\" 'n_feats': 256, 'norm': 5,", "arg_parser.add_argument('--step_size', default=10, type=int, help='Learning Rate: step size') arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate: gamma')", "0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" args = arg_parser.parse_args() # Make", "model, metric_fc, criterion): model.eval() output_list = [] target_list = [] running_loss = 0.0", "Calculate score at present. train_acc, train_loss = calc_score(output_list, target_list, running_loss, n_batch, batch_idx, train_loader)", "classification_report import pandas as pd import optuna from datasets import market1501 import metrics", "def opt(): study = optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number of finished", "test acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1, train_acc, train_loss, test_acc, test_loss)) stdout_temp =", "outputs] target_list += [int(t) for t in targets] running_loss += loss.item() # Calculate", "F import torch.backends.cudnn as cudnn import torchvision import torchvision.transforms as transforms import os", "else: loss = running_loss / len(data_loader.dataset) return acc, loss def parse_args(): # Set", "lr = trial.suggest_uniform('lr', 1e-3, 1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion = nn.CrossEntropyLoss()", "import torchvision.transforms as transforms import os import argparse from sklearn.metrics import classification_report import", "Value: ', trial.value) print(' Params: ') for key, value in trial.params.items(): print(' {}:", "args.n_batch) # Set a model. # cf. https://qiita.com/perrying/items/857df46bb6cdc3047bd8 n_feats = trial.suggest_categorical('n_feats', [256*1, 256*2])", "Forward processing. inputs, targets = inputs.to(device), targets.to(device) features = model(inputs) outputs = metric_fc(features,", "optimizer, scheduler) #test_acc, test_loss = test(device, test_loader, model, metric, criterion) # Output score.", "= calc_score(output_list, target_list, running_loss, train_loader) return train_acc, train_loss def test(device, test_loader, model, metric_fc,", "arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/')", "= trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0, 1]) metric = metrics.ArcMarginProduct(n_feats, len(class_names),", "[] target_list = [] running_loss = 0.0 for batch_idx, (inputs, targets) in enumerate(test_loader):", "running_loss / len(data_loader.dataset) return acc, loss def parse_args(): # Set arguments. arg_parser =", "running_loss += loss.item() test_acc, test_loss = calc_score(output_list, target_list, running_loss, test_loader) return test_acc, test_loss", "type=int, help='0 is False, 1 is True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='') arg_parser.add_argument('--norm', default=1,", "metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and test. for", "calc_score(output_list, target_list, running_loss, n_batch, batch_idx, train_loader) if (batch_idx % 100 == 0 and", "= inputs.to(device), targets.to(device) features = model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs,", "type=int, help='The number of base model output') arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is False,", "type=float, help='Learning Rate: gamma') \"\"\" {'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0,", "= criterion(outputs, targets) # Backward processing. optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() # Set data", "a metric \"\"\" 'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862,", "criterion) # Output score. #stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss:", "'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0, 'lr': 0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" args =", "import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn import", "print('Number of finished trials: ', len(study.trials)) print('Best trial:') trial = study.best_trial print(' Value:", "Calculate score. #train_acc, train_loss = calc_score(output_list, target_list, running_loss, train_loader) return train_acc, train_loss def", "= study.best_trial print(' Value: ', trial.value) print(' Params: ') for key, value in", "datasets import market1501 import metrics import torchvision.models as models def opt(): study =", "norm = trial.suggest_int('norm', 0, 30) margin = trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin',", "{:>3}/{:<3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss)) # Calculate", "stdout_temp = 'batch: {:>3}/{:<3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc,", "a model. train_acc, train_loss = train(device, train_loader, args.n_batch, model, metric, criterion, optimizer, scheduler)", "# Set a metric \"\"\" 'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014, 'easy_margin': 0,", "def test(device, test_loader, model, metric_fc, criterion): model.eval() output_list = [] target_list = []", "default=581, type=int, help='The number of base model output') arg_parser.add_argument('--easy_margin', default=1, type=int, help='0 is", "= model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets) # Set data", "type=float, help='Learning rate') arg_parser.add_argument('--n_feats', default=581, type=int, help='The number of base model output') arg_parser.add_argument('--easy_margin',", "arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate: gamma') \"\"\" {'n_feats': 256, 'norm': 5, 'margin': 0.0005883992558471014,", "targets.to(device).long() features = model(inputs) outputs = metric_fc(features, targets) loss = criterion(outputs, targets) #", "train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}' #print(stdout_temp.format(epoch+1,", "import metrics import torchvision.models as models def opt(): study = optuna.create_study(direction='maximize') #study =", "at {}'.format(model_ckpt_path)) #print('') return train_acc def train(device, train_loader, n_batch, model, metric_fc, criterion, optimizer,", "0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" args = arg_parser.parse_args() # Make directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir,", "* batch_idx < len(data_loader.dataset): loss = running_loss / (n_batch * (batch_idx+1)) else: loss", "\"\"\" norm = trial.suggest_int('norm', 0, 30) margin = trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin =", "default='Market1501') arg_parser.add_argument('--data_dir', default='D:/workspace/datasets/Market-1501-v15.09.15/') arg_parser.add_argument('--anno_dir', default='../data/annos/') arg_parser.add_argument('--anno_path', default='../data/annos/anno_market1501_train.csv') arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50')", "batch_idx, data_loader): # Calculate accuracy. result = classification_report(output_list, target_list, output_dict=True) acc = round(result['weighted", "= classification_report(output_list, target_list, output_dict=True) acc = round(result['weighted avg']['f1-score'], 6) #loss = round(running_loss /", "pandas as pd import optuna from datasets import market1501 import metrics import torchvision.models", "model_ckpt_path) #print('Saved a model checkpoint at {}'.format(model_ckpt_path)) #print('') return train_acc def train(device, train_loader,", "(batch_idx+1)) else: loss = running_loss / len(data_loader.dataset) return acc, loss def parse_args(): #", "default=0.001, type=float, help='Learning rate') arg_parser.add_argument('--n_feats', default=581, type=int, help='The number of base model output')", "default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch', default=12, type=int, help='The number of epoch') arg_parser.add_argument('--lr', default=0.001,", "True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='') arg_parser.add_argument('--norm', default=1, type=int, help='ArcFace: norm of input feature')", "= 0.0 for batch_idx, (inputs, targets) in enumerate(train_loader): # Forward processing. inputs, targets", "= args.model_ckpt_path_temp.format(args.dataset_name, args.model_name, epoch+1) #torch.save(model.state_dict(), model_ckpt_path) #print('Saved a model checkpoint at {}'.format(model_ckpt_path)) #print('')", "0.08620634410578862, 'weight_decay': 0.009787166658749052}. \"\"\" norm = trial.suggest_int('norm', 0, 30) margin = trial.suggest_uniform('margin', 0.0,", "train loss: {:<8}' print(stdout_temp.format(batch_idx, len(train_loader), train_acc, train_loss)) # Calculate score. #train_acc, train_loss =", "optuna.create_study(direction='maximize') #study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=1000) print('Number of finished trials: ', len(study.trials)) print('Best", "optim.SGD([{'params': model.parameters()}, {'params': metric.parameters()}], lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train", "+= [int(t) for t in targets] running_loss += loss.item() # Calculate score at", "calc_score(output_list, target_list, running_loss, n_batch, batch_idx, data_loader): # Calculate accuracy. result = classification_report(output_list, target_list,", "device = 'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset. if os.path.exists(args.anno_path) ==", "Set data to calculate score. output_list += [int(o.argmax()) for o in outputs] target_list", "default=1, type=int, help='0 is False, 1 is True') arg_parser.add_argument('--weight_decay', default=5e-4, type=float, help='') arg_parser.add_argument('--norm',", "accuracy. result = classification_report(output_list, target_list, output_dict=True) acc = round(result['weighted avg']['f1-score'], 6) #loss =", "'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}' print(stdout_temp.format(epoch+1, train_acc, train_loss)) # Save", "[int(t) for t in targets] running_loss += loss.item() # Calculate score at present.", "arg_parser.add_argument('--n_feats', default=581, type=int, help='The number of base model output') arg_parser.add_argument('--easy_margin', default=1, type=int, help='0", "criterion(outputs, targets) # Set data to calculate score. output_list += [int(o.argmax()) for o", "print('Best trial:') trial = study.best_trial print(' Value: ', trial.value) print(' Params: ') for", "trial.suggest_categorical('n_feats', [256*1, 256*2]) model = models.resnet50(pretrained=True) model.fc = nn.Linear(2048, n_feats) model = model.to(device)", "market1501 import metrics import torchvision.models as models def opt(): study = optuna.create_study(direction='maximize') #study", "number of epoch') arg_parser.add_argument('--lr', default=0.001, type=float, help='Learning rate') arg_parser.add_argument('--n_feats', default=581, type=int, help='The number", "score. #stdout_temp = 'epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc:", "calculate score. output_list += [int(o.argmax()) for o in outputs] target_list += [int(t) for", "device. device = 'cuda' if torch.cuda.is_available() else 'cpu' # Load dataset. if os.path.exists(args.anno_path)", "lr=lr, weight_decay=weight_decay) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) # Train and test. for epoch", "# Make directory. os.makedirs(args.anno_dir, exist_ok=True) os.makedirs(args.model_ckpt_dir, exist_ok=True) # Validate paths. assert os.path.exists(args.data_dir) assert", "1e-1) weight_decay = trial.suggest_uniform('weight_decay', 1e-3, 1e-1) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD([{'params': model.parameters()},", "0, 30) margin = trial.suggest_uniform('margin', 0.0, 1e-3) easy_margin = trial.suggest_categorical('easy_margin', [0, 1]) metric", "arg_parser.add_argument('--n_batch', default=32, type=int) arg_parser.add_argument(\"--model_name\", type=str, default='ResNet50') arg_parser.add_argument(\"--model_ckpt_dir\", type=str, default='../experiments/models/checkpoints/') arg_parser.add_argument(\"--model_ckpt_path_temp\", type=str, default='../experiments/models/checkpoints/{}_{}_epoch={}.pth') arg_parser.add_argument('--n_epoch',", "metric_fc, criterion, optimizer, scheduler): model.train() output_list = [] target_list = [] running_loss =", "data_loader): # Calculate accuracy. result = classification_report(output_list, target_list, output_dict=True) acc = round(result['weighted avg']['f1-score'],", "outputs] target_list += [int(t) for t in targets] running_loss += loss.item() test_acc, test_loss", "size') arg_parser.add_argument('--gamma', default=0.1, type=float, help='Learning Rate: gamma') \"\"\" {'n_feats': 256, 'norm': 5, 'margin':" ]
[ "Random Image Of Cat\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async", "await r.json() em = discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\")", "Random Image Of Dog\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async", "em = discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em)", "Random Image Of Kola\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async", "discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command()", "async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/red_panda') as r: data = await", "\"\"\"Gives You Random Image Of Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as", "By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def bird(self, ctx): \"\"\"Gives", "as cs: async with cs.get('https://some-random-api.ml/img/panda') as r: data = await r.json() em =", "color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async", "def kola(self, ctx): \"\"\"Gives You Random Image Of Kola\"\"\" async with ctx.channel.typing(): async", "Image Or GIF Of Pikachu\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs:", "@commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def cat(self, ctx): \"\"\"Gives You Random Image Of", "By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def kola(self, ctx): \"\"\"Gives", "with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/fox') as r: data", "as r: data = await r.json() em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link'])", "as cs: async with cs.get('https://some-random-api.ml/img/koala') as r: data = await r.json() em =", "Of Fox\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/fox')", "text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) # @commands.command() # @commands.cooldown(1, 10, commands.BucketType.user) # async", "ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def red_panda(self, ctx): \"\"\"Gives You Random Image", "async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/fox') as r:", "with cs.get('https://some-random-api.ml/img/red_panda') as r: data = await r.json() em = discord.Embed( title='Red Panda',", "self.bot = bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def cat(self, ctx): \"\"\"Gives You", "ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/fox') as r: data =", "cs: async with cs.get('https://some-random-api.ml/img/birb') as r: data = await r.json() em = discord.Embed(", "async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/koala') as r: data = await", "commands.BucketType.user) async def panda(self, ctx): \"\"\"Gives You Random Image Of Panda\"\"\" async with", "with cs.get('https://some-random-api.ml/img/pikachu') as r: data = await r.json() em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at,", "Image Of Fox\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with", "async with aiohttp.ClientSession() as cs: async with cs.get('http://aws.random.cat/meow') as r: data = await", "em = discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em)", "\"\"\"Gives You Random Image Of Kola\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as", "@commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def red_panda(self, ctx): \"\"\"Gives You Random Image Of", "with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/panda') as r: data = await r.json()", "# em.set_image(url=url) # em.set_footer(text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar_url) # await ctx.send(embed=em) def setup(bot): bot.add_cog(Image(bot))", "async def red_panda(self, ctx): \"\"\"Gives You Random Image Of Red Panda\"\"\" async with", "\"\"\"Gives You Random Image Of Dog\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as", "r.json() em = discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\")", "10, commands.BucketType.user) async def pikachu(self, ctx): \"\"\"Gives You Random Image Or GIF Of", "@commands.cooldown(1, 10, commands.BucketType.user) async def red_panda(self, ctx): \"\"\"Gives You Random Image Of Red", "Image Of Dog\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with", "aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/koala') as r: data = await r.json() em", "async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/koala') as r:", "r: data = await r.json() em = discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url,", "as r: data = await r.json() em = discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link'])", "with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/pikachu') as r: data = await r.json()", "await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def bird(self, ctx): \"\"\"Gives You Random", "text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def pikachu(self, ctx):", "commands.BucketType.user) async def kola(self, ctx): \"\"\"Gives You Random Image Of Kola\"\"\" async with", "async with cs.get('https://some-random-api.ml/img/koala') as r: data = await r.json() em = discord.Embed( title='kola',", "r: data = await r.json() em = discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url,", "= discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command()", "text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def panda(self, ctx):", "em = discord.Embed(color = ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar_url) #", "discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1,", "cs: async with cs.get('https://some-random-api.ml/img/pikachu') as r: data = await r.json() em = discord.Embed(", "@commands.cooldown(1, 10, commands.BucketType.user) async def dog(self, ctx): \"\"\"Gives You Random Image Of Dog\"\"\"", "ctx): \"\"\"Gives You Random Image Of Bird\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession()", "Of Red Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with", "async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/fox') as r: data = await", "By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def fox(self, ctx): \"\"\"Gives", "ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/panda') as r: data =", "timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user)", "ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/koala') as r: data =", "async with cs.get('https://some-random-api.ml/img/panda') as r: data = await r.json() em = discord.Embed( title='Panda',", "@commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def panda(self, ctx): \"\"\"Gives You Random Image Of", "data = await r.json() em = discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested", "with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/fox') as r: data = await r.json()", "ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/birb') as r: data =", "ctx): \"\"\"Gives You Random Image Of Fox\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession()", "def pikachu(self, ctx): \"\"\"Gives You Random Image Or GIF Of Pikachu\"\"\" async with", "\"\"\"Gives You Random Image Or GIF Of Pikachu\"\"\" async with ctx.channel.typing(): async with", "await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def kola(self, ctx): \"\"\"Gives You Random", "On Youtube\"\"\" # url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em = discord.Embed(color = ctx.author.color) #", "color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async", "aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/fox') as r: data = await r.json() em", "as cs: async with cs.get('https://some-random-api.ml/img/birb') as r: data = await r.json() em =", "@commands.cooldown(1, 10, commands.BucketType.user) # async def yt(self,ctx,comment:str): # \"\"\"Comments On Youtube\"\"\" # url", "with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/red_panda') as r: data = await r.json()", "async def bird(self, ctx): \"\"\"Gives You Random Image Of Bird\"\"\" async with ctx.channel.typing():", "= discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command()", "__init__(self, bot): self.bot = bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def cat(self, ctx):", "async def cat(self, ctx): \"\"\"Gives You Random Image Of Cat\"\"\" async with ctx.channel.typing():", "as r: data = await r.json() em = discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link'])", "cs.get('https://some-random-api.ml/img/birb') as r: data = await r.json() em = discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color)", "async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://random.dog/woof.json') as r:", "await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def pikachu(self, ctx): \"\"\"Gives You Random", "dog(self, ctx): \"\"\"Gives You Random Image Of Dog\"\"\" async with ctx.channel.typing(): async with", "async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/birb') as r:", "aiohttp.ClientSession() as cs: async with cs.get('http://random.dog/woof.json') as r: data = await r.json() em", "async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/panda') as r: data = await", "import commands import aiohttp import requests class Image(commands.Cog, name='Image'): def __init__(self, bot): self.bot", "= await r.json() em = discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By", "discord.ext import commands import aiohttp import requests class Image(commands.Cog, name='Image'): def __init__(self, bot):", "with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://aws.random.cat/meow') as r: data", "Of Dog\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://random.dog/woof.json')", "commands.BucketType.user) async def dog(self, ctx): \"\"\"Gives You Random Image Of Dog\"\"\" async with", "r.json() em = discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await", "ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def bird(self, ctx): \"\"\"Gives You Random Image", "\"\"\"Gives You Random Image Of Red Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession()", "requests class Image(commands.Cog, name='Image'): def __init__(self, bot): self.bot = bot @commands.command() @commands.cooldown(1, 10,", "r: data = await r.json() em = discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url,", "= f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em = discord.Embed(color = ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f\"Requested by", "as r: data = await r.json() em = discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file'])", "await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def fox(self, ctx): \"\"\"Gives You Random", "cs.get('https://some-random-api.ml/img/panda') as r: data = await r.json() em = discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color)", "with aiohttp.ClientSession() as cs: async with cs.get('http://random.dog/woof.json') as r: data = await r.json()", "ctx.send(embed=em) # @commands.command() # @commands.cooldown(1, 10, commands.BucketType.user) # async def yt(self,ctx,comment:str): # \"\"\"Comments", "ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def panda(self, ctx): \"\"\"Gives You Random Image", "r: data = await r.json() em = discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url,", "timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user)", "pikachu(self, ctx): \"\"\"Gives You Random Image Or GIF Of Pikachu\"\"\" async with ctx.channel.typing():", "Of Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/panda')", "\"\"\"Comments On Youtube\"\"\" # url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em = discord.Embed(color = ctx.author.color)", "= discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command()", "10, commands.BucketType.user) async def fox(self, ctx): \"\"\"Gives You Random Image Of Fox\"\"\" async", "red_panda(self, ctx): \"\"\"Gives You Random Image Of Red Panda\"\"\" async with ctx.channel.typing(): async", "Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10,", "as r: data = await r.json() em = discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link'])", "Random Image Of Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async", "You Random Image Of Fox\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs:", "url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em = discord.Embed(color = ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f\"Requested", "By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def panda(self, ctx): \"\"\"Gives", "async with cs.get('http://random.dog/woof.json') as r: data = await r.json() em = discord.Embed( title='Dog',", "ctx): \"\"\"Gives You Random Image Of Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession()", "Random Image Of Bird\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async", "ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://aws.random.cat/meow') as r: data =", "await r.json() em = discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\")", "ctx): \"\"\"Gives You Random Image Of Red Panda\"\"\" async with ctx.channel.typing(): async with", "title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10,", "You Random Image Of Red Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as", "r.json() em = discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await", "text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def bird(self, ctx):", "10, commands.BucketType.user) async def kola(self, ctx): \"\"\"Gives You Random Image Of Kola\"\"\" async", "{ctx.author.name}\") await ctx.send(embed=em) # @commands.command() # @commands.cooldown(1, 10, commands.BucketType.user) # async def yt(self,ctx,comment:str):", "text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def red_panda(self, ctx):", "with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/pikachu') as r: data", "aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/panda') as r: data = await r.json() em", "@commands.cooldown(1, 10, commands.BucketType.user) async def kola(self, ctx): \"\"\"Gives You Random Image Of Kola\"\"\"", "cs.get('https://some-random-api.ml/img/koala') as r: data = await r.json() em = discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color)", "Kola\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/koala') as", "{ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def kola(self, ctx): \"\"\"Gives You", "@commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def kola(self, ctx): \"\"\"Gives You Random Image Of", "async with cs.get('https://some-random-api.ml/img/red_panda') as r: data = await r.json() em = discord.Embed( title='Red", "await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def red_panda(self, ctx): \"\"\"Gives You Random", "@commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def fox(self, ctx): \"\"\"Gives You Random Image Of", "yt(self,ctx,comment:str): # \"\"\"Comments On Youtube\"\"\" # url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em = discord.Embed(color", "By {ctx.author.name}\") await ctx.send(embed=em) # @commands.command() # @commands.cooldown(1, 10, commands.BucketType.user) # async def", "await r.json() em = discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By", "with cs.get('http://aws.random.cat/meow') as r: data = await r.json() em = discord.Embed( title='Cat', timestamp=ctx.message.created_at,", "async def yt(self,ctx,comment:str): # \"\"\"Comments On Youtube\"\"\" # url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em", "with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/red_panda') as r: data", "Fox\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/fox') as", "commands.BucketType.user) async def pikachu(self, ctx): \"\"\"Gives You Random Image Or GIF Of Pikachu\"\"\"", "ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/red_panda') as r: data =", "You Random Image Of Dog\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs:", "aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/red_panda') as r: data = await r.json() em", "em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def bird(self,", "em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def red_panda(self,", "ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def fox(self, ctx): \"\"\"Gives You Random Image", "discord.Embed(color = ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar_url) # await ctx.send(embed=em)", "@commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def dog(self, ctx): \"\"\"Gives You Random Image Of", "bird(self, ctx): \"\"\"Gives You Random Image Of Bird\"\"\" async with ctx.channel.typing(): async with", "Cat\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://aws.random.cat/meow') as", "bot): self.bot = bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def cat(self, ctx): \"\"\"Gives", "color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async", "{ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def pikachu(self, ctx): \"\"\"Gives You", "= discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command()", "async def fox(self, ctx): \"\"\"Gives You Random Image Of Fox\"\"\" async with ctx.channel.typing():", "aiohttp.ClientSession() as cs: async with cs.get('http://aws.random.cat/meow') as r: data = await r.json() em", "ctx): \"\"\"Gives You Random Image Or GIF Of Pikachu\"\"\" async with ctx.channel.typing(): async", "cs.get('https://some-random-api.ml/img/pikachu') as r: data = await r.json() em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color)", "commands.BucketType.user) async def bird(self, ctx): \"\"\"Gives You Random Image Of Bird\"\"\" async with", "def __init__(self, bot): self.bot = bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def cat(self,", "\"\"\"Gives You Random Image Of Cat\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as", "with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/koala') as r: data = await r.json()", "await r.json() em = discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\")", "Image Of Bird\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with", "cat(self, ctx): \"\"\"Gives You Random Image Of Cat\"\"\" async with ctx.channel.typing(): async with", "data = await r.json() em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested", "{ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def red_panda(self, ctx): \"\"\"Gives You", "timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user)", "text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def dog(self, ctx):", "em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) # @commands.command() # @commands.cooldown(1, 10, commands.BucketType.user)", "import discord from discord.ext import commands import aiohttp import requests class Image(commands.Cog, name='Image'):", "cs: async with cs.get('https://some-random-api.ml/img/fox') as r: data = await r.json() em = discord.Embed(", "GIF Of Pikachu\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with", "Image Of Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with", "ctx): \"\"\"Gives You Random Image Of Dog\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession()", "cs: async with cs.get('https://some-random-api.ml/img/panda') as r: data = await r.json() em = discord.Embed(", "await ctx.send(embed=em) # @commands.command() # @commands.cooldown(1, 10, commands.BucketType.user) # async def yt(self,ctx,comment:str): #", "with cs.get('https://some-random-api.ml/img/fox') as r: data = await r.json() em = discord.Embed( title='Fox', timestamp=ctx.message.created_at,", "r: data = await r.json() em = discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url,", "with cs.get('http://random.dog/woof.json') as r: data = await r.json() em = discord.Embed( title='Dog', timestamp=ctx.message.created_at,", "@commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def pikachu(self, ctx): \"\"\"Gives You Random Image Or", "{ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def panda(self, ctx): \"\"\"Gives You", "@commands.cooldown(1, 10, commands.BucketType.user) async def cat(self, ctx): \"\"\"Gives You Random Image Of Cat\"\"\"", "commands.BucketType.user) # async def yt(self,ctx,comment:str): # \"\"\"Comments On Youtube\"\"\" # url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\"", "10, commands.BucketType.user) async def dog(self, ctx): \"\"\"Gives You Random Image Of Dog\"\"\" async", "discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1,", "cs.get('https://some-random-api.ml/img/fox') as r: data = await r.json() em = discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color)", "{ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def bird(self, ctx): \"\"\"Gives You", "Bird\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/birb') as", "async def panda(self, ctx): \"\"\"Gives You Random Image Of Panda\"\"\" async with ctx.channel.typing():", "em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def dog(self,", "@commands.cooldown(1, 10, commands.BucketType.user) async def fox(self, ctx): \"\"\"Gives You Random Image Of Fox\"\"\"", "= await r.json() em = discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By", "Of Cat\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://aws.random.cat/meow')", "async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/pikachu') as r: data = await", "await r.json() em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\")", "kola(self, ctx): \"\"\"Gives You Random Image Of Kola\"\"\" async with ctx.channel.typing(): async with", "title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10,", "em = discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em)", "@commands.cooldown(1, 10, commands.BucketType.user) async def bird(self, ctx): \"\"\"Gives You Random Image Of Bird\"\"\"", "with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://random.dog/woof.json') as r: data", "em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def kola(self,", "as cs: async with cs.get('https://some-random-api.ml/img/fox') as r: data = await r.json() em =", "em = discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await", "await r.json() em = discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\")", "By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def red_panda(self, ctx): \"\"\"Gives", "async with aiohttp.ClientSession() as cs: async with cs.get('http://random.dog/woof.json') as r: data = await", "with cs.get('https://some-random-api.ml/img/birb') as r: data = await r.json() em = discord.Embed( title='Bird', timestamp=ctx.message.created_at,", "@commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def bird(self, ctx): \"\"\"Gives You Random Image Of", "em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def pikachu(self,", "# \"\"\"Comments On Youtube\"\"\" # url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em = discord.Embed(color =", "text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def kola(self, ctx):", "panda(self, ctx): \"\"\"Gives You Random Image Of Panda\"\"\" async with ctx.channel.typing(): async with", "data = await r.json() em = discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested", "aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/birb') as r: data = await r.json() em", "By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def pikachu(self, ctx): \"\"\"Gives", "title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) # @commands.command() #", "def bird(self, ctx): \"\"\"Gives You Random Image Of Bird\"\"\" async with ctx.channel.typing(): async", "ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar_url) # await ctx.send(embed=em) def setup(bot):", "def dog(self, ctx): \"\"\"Gives You Random Image Of Dog\"\"\" async with ctx.channel.typing(): async", "cs: async with cs.get('http://aws.random.cat/meow') as r: data = await r.json() em = discord.Embed(", "color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) # @commands.command() # @commands.cooldown(1, 10,", "em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def panda(self,", "r.json() em = discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await", "# em = discord.Embed(color = ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar_url)", "Image Of Cat\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with", "= await r.json() em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By", "You Random Image Of Kola\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs:", "= discord.Embed(color = ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar_url) # await", "cs.get('http://random.dog/woof.json') as r: data = await r.json() em = discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color)", "as cs: async with cs.get('https://some-random-api.ml/img/pikachu') as r: data = await r.json() em =", "# @commands.command() # @commands.cooldown(1, 10, commands.BucketType.user) # async def yt(self,ctx,comment:str): # \"\"\"Comments On", "Image Of Kola\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with", "ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def pikachu(self, ctx): \"\"\"Gives You Random Image", "Of Bird\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/birb')", "Youtube\"\"\" # url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em = discord.Embed(color = ctx.author.color) # em.set_image(url=url)", "commands.BucketType.user) async def red_panda(self, ctx): \"\"\"Gives You Random Image Of Red Panda\"\"\" async", "def fox(self, ctx): \"\"\"Gives You Random Image Of Fox\"\"\" async with ctx.channel.typing(): async", "data = await r.json() em = discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested", "ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def dog(self, ctx): \"\"\"Gives You Random Image", "async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://aws.random.cat/meow') as r:", "async with cs.get('http://aws.random.cat/meow') as r: data = await r.json() em = discord.Embed( title='Cat',", "title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10,", "Of Kola\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/koala')", "with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/koala') as r: data", "title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1,", "await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def dog(self, ctx): \"\"\"Gives You Random", "= discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em)", "= await r.json() em = discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By", "# async def yt(self,ctx,comment:str): # \"\"\"Comments On Youtube\"\"\" # url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" #", "@commands.command() # @commands.cooldown(1, 10, commands.BucketType.user) # async def yt(self,ctx,comment:str): # \"\"\"Comments On Youtube\"\"\"", "with cs.get('https://some-random-api.ml/img/panda') as r: data = await r.json() em = discord.Embed( title='Panda', timestamp=ctx.message.created_at,", "{ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def fox(self, ctx): \"\"\"Gives You", "= await r.json() em = discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By", "em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def", "You Random Image Of Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs:", "title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10,", "f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em = discord.Embed(color = ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f\"Requested by {ctx.author}\",", "ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://random.dog/woof.json') as r: data =", "data = await r.json() em = discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested", "fox(self, ctx): \"\"\"Gives You Random Image Of Fox\"\"\" async with ctx.channel.typing(): async with", "# @commands.cooldown(1, 10, commands.BucketType.user) # async def yt(self,ctx,comment:str): # \"\"\"Comments On Youtube\"\"\" #", "You Random Image Of Bird\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs:", "await r.json() em = discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\")", "timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) # @commands.command() # @commands.cooldown(1,", "cs.get('http://aws.random.cat/meow') as r: data = await r.json() em = discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color)", "r.json() em = discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await", "Random Image Of Red Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs:", "await r.json() em = discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\")", "10, commands.BucketType.user) async def bird(self, ctx): \"\"\"Gives You Random Image Of Bird\"\"\" async", "data = await r.json() em = discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url,", "def cat(self, ctx): \"\"\"Gives You Random Image Of Cat\"\"\" async with ctx.channel.typing(): async", "# url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em = discord.Embed(color = ctx.author.color) # em.set_image(url=url) #", "data = await r.json() em = discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested", "as r: data = await r.json() em = discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link'])", "= discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) #", "Random Image Or GIF Of Pikachu\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as", "async with cs.get('https://some-random-api.ml/img/birb') as r: data = await r.json() em = discord.Embed( title='Bird',", "as cs: async with cs.get('https://some-random-api.ml/img/red_panda') as r: data = await r.json() em =", "async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/pikachu') as r:", "= discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command()", "{ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def dog(self, ctx): \"\"\"Gives You", "def panda(self, ctx): \"\"\"Gives You Random Image Of Panda\"\"\" async with ctx.channel.typing(): async", "em = discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em)", "Image Of Red Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async", "em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def", "with aiohttp.ClientSession() as cs: async with cs.get('http://aws.random.cat/meow') as r: data = await r.json()", "def red_panda(self, ctx): \"\"\"Gives You Random Image Of Red Panda\"\"\" async with ctx.channel.typing():", "discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1,", "@commands.cooldown(1, 10, commands.BucketType.user) async def panda(self, ctx): \"\"\"Gives You Random Image Of Panda\"\"\"", "with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/panda') as r: data", "You Random Image Or GIF Of Pikachu\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession()", "data = await r.json() em = discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested", "import aiohttp import requests class Image(commands.Cog, name='Image'): def __init__(self, bot): self.bot = bot", "= discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command()", "name='Image'): def __init__(self, bot): self.bot = bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def", "with cs.get('https://some-random-api.ml/img/koala') as r: data = await r.json() em = discord.Embed( title='kola', timestamp=ctx.message.created_at,", "from discord.ext import commands import aiohttp import requests class Image(commands.Cog, name='Image'): def __init__(self,", "as r: data = await r.json() em = discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color)", "async with cs.get('https://some-random-api.ml/img/pikachu') as r: data = await r.json() em = discord.Embed( title='Pikachu',", "em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em)", "em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) # @commands.command() # @commands.cooldown(1, 10, commands.BucketType.user) #", "@commands.cooldown(1, 10, commands.BucketType.user) async def pikachu(self, ctx): \"\"\"Gives You Random Image Or GIF", "with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/birb') as r: data = await r.json()", "bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def cat(self, ctx): \"\"\"Gives You Random Image", "async def pikachu(self, ctx): \"\"\"Gives You Random Image Or GIF Of Pikachu\"\"\" async", "as cs: async with cs.get('http://aws.random.cat/meow') as r: data = await r.json() em =", "commands.BucketType.user) async def fox(self, ctx): \"\"\"Gives You Random Image Of Fox\"\"\" async with", "discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1,", "Dog\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('http://random.dog/woof.json') as", "cs: async with cs.get('https://some-random-api.ml/img/koala') as r: data = await r.json() em = discord.Embed(", "em = discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em)", "async def dog(self, ctx): \"\"\"Gives You Random Image Of Dog\"\"\" async with ctx.channel.typing():", "r.json() em = discord.Embed( title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await", "cs: async with cs.get('http://random.dog/woof.json') as r: data = await r.json() em = discord.Embed(", "r.json() em = discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await", "commands.BucketType.user) async def cat(self, ctx): \"\"\"Gives You Random Image Of Cat\"\"\" async with", "r.json() em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await", "= await r.json() em = discord.Embed( title='Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By", "discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) # @commands.command()", "ctx): \"\"\"Gives You Random Image Of Kola\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession()", "ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/pikachu') as r: data =", "10, commands.BucketType.user) async def red_panda(self, ctx): \"\"\"Gives You Random Image Of Red Panda\"\"\"", "r: data = await r.json() em = discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link'])", "= await r.json() em = discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By", "as r: data = await r.json() em = discord.Embed( title='Dog', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['url'])", "Pikachu\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/pikachu') as", "Image(commands.Cog, name='Image'): def __init__(self, bot): self.bot = bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async", "Of Pikachu\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/pikachu')", "Red Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/red_panda')", "discord from discord.ext import commands import aiohttp import requests class Image(commands.Cog, name='Image'): def", "aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/pikachu') as r: data = await r.json() em", "em.set_image(url=data['url']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def", "async def kola(self, ctx): \"\"\"Gives You Random Image Of Kola\"\"\" async with ctx.channel.typing():", "You Random Image Of Cat\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs:", "Or GIF Of Pikachu\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async", "em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def fox(self,", "= bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def cat(self, ctx): \"\"\"Gives You Random", "title='Cat', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['file']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10,", "10, commands.BucketType.user) async def cat(self, ctx): \"\"\"Gives You Random Image Of Cat\"\"\" async", "10, commands.BucketType.user) async def panda(self, ctx): \"\"\"Gives You Random Image Of Panda\"\"\" async", "async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/red_panda') as r:", "ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def kola(self, ctx): \"\"\"Gives You Random Image", "async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/birb') as r: data = await", "title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10,", "10, commands.BucketType.user) # async def yt(self,ctx,comment:str): # \"\"\"Comments On Youtube\"\"\" # url =", "cs: async with cs.get('https://some-random-api.ml/img/red_panda') as r: data = await r.json() em = discord.Embed(", "\"\"\"Gives You Random Image Of Bird\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as", "discord.Embed( title='Bird', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1,", "By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def dog(self, ctx): \"\"\"Gives", "await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def panda(self, ctx): \"\"\"Gives You Random", "= await r.json() em = discord.Embed( title='Red Panda', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested", "r: data = await r.json() em = discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url,", "= ctx.author.color) # em.set_image(url=url) # em.set_footer(text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar_url) # await ctx.send(embed=em) def", "async with cs.get('https://some-random-api.ml/img/fox') as r: data = await r.json() em = discord.Embed( title='Fox',", "class Image(commands.Cog, name='Image'): def __init__(self, bot): self.bot = bot @commands.command() @commands.cooldown(1, 10, commands.BucketType.user)", "ctx): \"\"\"Gives You Random Image Of Cat\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession()", "r: data = await r.json() em = discord.Embed( title='Pikachu', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url,", "as cs: async with cs.get('http://random.dog/woof.json') as r: data = await r.json() em =", "em = discord.Embed( title='Fox', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em)", "async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/panda') as r:", "Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/red_panda') as", "text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1, 10, commands.BucketType.user) async def fox(self, ctx):", "import requests class Image(commands.Cog, name='Image'): def __init__(self, bot): self.bot = bot @commands.command() @commands.cooldown(1,", "Random Image Of Fox\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async", "Panda\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/panda') as", "aiohttp import requests class Image(commands.Cog, name='Image'): def __init__(self, bot): self.bot = bot @commands.command()", "cs.get('https://some-random-api.ml/img/red_panda') as r: data = await r.json() em = discord.Embed( title='Red Panda', timestamp=ctx.message.created_at,", "discord.Embed( title='kola', timestamp=ctx.message.created_at, color=self.bot.color) em.set_image(url=data['link']) em.set_footer(icon_url=ctx.author.avatar_url, text=f\"Requested By {ctx.author.name}\") await ctx.send(embed=em) @commands.command() @commands.cooldown(1,", "commands import aiohttp import requests class Image(commands.Cog, name='Image'): def __init__(self, bot): self.bot =", "with ctx.channel.typing(): async with aiohttp.ClientSession() as cs: async with cs.get('https://some-random-api.ml/img/birb') as r: data", "def yt(self,ctx,comment:str): # \"\"\"Comments On Youtube\"\"\" # url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={ctx.author.avatar_url_as(format='png')}&username={ctx.author}&comment={comment}\" # em =", "\"\"\"Gives You Random Image Of Fox\"\"\" async with ctx.channel.typing(): async with aiohttp.ClientSession() as" ]
[]
[ "len(names) > 1: self.suffix = names[-1] if not self.hash_value and self.obj: content =", "not (obj or hash_value): raise ValueError(\"obj and hash_value both are None\") self.obj =", "self.obj: return self.obj.read() if self.storage: return self.storage.read(self.key(), range) raise Exception(\"can not find content\")", "self.name = name self.suffix = \"\" self.length = 0 self.hash_value = hash_value self.storage", "os.SEEK_SET) def __str__(self): return self.hash_value def key(self): if self.suffix: return \".\".join([self.hash_value, self.suffix]) return", "if self.suffix: return \".\".join([self.hash_value, self.suffix]) return self.hash_value def content(self, range=None): if self.obj: return", "if len(names) > 1: self.suffix = names[-1] if not self.hash_value and self.obj: content", "content_md5 class FileObject(object): def __init__(self, name=\"\", obj=None, hash_value=None, storage=None): if not (obj or", "self.hash_value = content_md5(content) self.obj.seek(0, os.SEEK_SET) def __str__(self): return self.hash_value def key(self): if self.suffix:", "names[-1] if not self.hash_value and self.obj: content = self.obj.read() self.length = len(content) self.hash_value", "None\") self.obj = obj self.name = name self.suffix = \"\" self.length = 0", "> 1: self.suffix = names[-1] if not self.hash_value and self.obj: content = self.obj.read()", "not self.hash_value and self.obj: content = self.obj.read() self.length = len(content) self.hash_value = content_md5(content)", "def __init__(self, name=\"\", obj=None, hash_value=None, storage=None): if not (obj or hash_value): raise ValueError(\"obj", "self.hash_value = hash_value self.storage = storage names = name.split(\".\") if len(names) > 1:", "__str__(self): return self.hash_value def key(self): if self.suffix: return \".\".join([self.hash_value, self.suffix]) return self.hash_value def", "both are None\") self.obj = obj self.name = name self.suffix = \"\" self.length", "if not self.hash_value and self.obj: content = self.obj.read() self.length = len(content) self.hash_value =", "= name.split(\".\") if len(names) > 1: self.suffix = names[-1] if not self.hash_value and", "= 0 self.hash_value = hash_value self.storage = storage names = name.split(\".\") if len(names)", "0 self.hash_value = hash_value self.storage = storage names = name.split(\".\") if len(names) >", "are None\") self.obj = obj self.name = name self.suffix = \"\" self.length =", "= obj self.name = name self.suffix = \"\" self.length = 0 self.hash_value =", "if not (obj or hash_value): raise ValueError(\"obj and hash_value both are None\") self.obj", "hash_value self.storage = storage names = name.split(\".\") if len(names) > 1: self.suffix =", "class FileObject(object): def __init__(self, name=\"\", obj=None, hash_value=None, storage=None): if not (obj or hash_value):", "\".\".join([self.hash_value, self.suffix]) return self.hash_value def content(self, range=None): if self.obj: return self.obj.read() if self.storage:", "hash_value=None, storage=None): if not (obj or hash_value): raise ValueError(\"obj and hash_value both are", "def __str__(self): return self.hash_value def key(self): if self.suffix: return \".\".join([self.hash_value, self.suffix]) return self.hash_value", "= hash_value self.storage = storage names = name.split(\".\") if len(names) > 1: self.suffix", "content_md5(content) self.obj.seek(0, os.SEEK_SET) def __str__(self): return self.hash_value def key(self): if self.suffix: return \".\".join([self.hash_value,", "hash_value both are None\") self.obj = obj self.name = name self.suffix = \"\"", "import content_md5 class FileObject(object): def __init__(self, name=\"\", obj=None, hash_value=None, storage=None): if not (obj", "storage=None): if not (obj or hash_value): raise ValueError(\"obj and hash_value both are None\")", "= content_md5(content) self.obj.seek(0, os.SEEK_SET) def __str__(self): return self.hash_value def key(self): if self.suffix: return", "= names[-1] if not self.hash_value and self.obj: content = self.obj.read() self.length = len(content)", "return self.hash_value def content(self, range=None): if self.obj: return self.obj.read() if self.storage: return self.storage.read(self.key(),", "self.storage = storage names = name.split(\".\") if len(names) > 1: self.suffix = names[-1]", "self.suffix: return \".\".join([self.hash_value, self.suffix]) return self.hash_value def content(self, range=None): if self.obj: return self.obj.read()", "self.suffix = names[-1] if not self.hash_value and self.obj: content = self.obj.read() self.length =", "ValueError(\"obj and hash_value both are None\") self.obj = obj self.name = name self.suffix", "or hash_value): raise ValueError(\"obj and hash_value both are None\") self.obj = obj self.name", "names = name.split(\".\") if len(names) > 1: self.suffix = names[-1] if not self.hash_value", "len(content) self.hash_value = content_md5(content) self.obj.seek(0, os.SEEK_SET) def __str__(self): return self.hash_value def key(self): if", "(obj or hash_value): raise ValueError(\"obj and hash_value both are None\") self.obj = obj", "self.obj.seek(0, os.SEEK_SET) def __str__(self): return self.hash_value def key(self): if self.suffix: return \".\".join([self.hash_value, self.suffix])", "= len(content) self.hash_value = content_md5(content) self.obj.seek(0, os.SEEK_SET) def __str__(self): return self.hash_value def key(self):", "obj self.name = name self.suffix = \"\" self.length = 0 self.hash_value = hash_value", "1: self.suffix = names[-1] if not self.hash_value and self.obj: content = self.obj.read() self.length", "name=\"\", obj=None, hash_value=None, storage=None): if not (obj or hash_value): raise ValueError(\"obj and hash_value", "key(self): if self.suffix: return \".\".join([self.hash_value, self.suffix]) return self.hash_value def content(self, range=None): if self.obj:", "self.hash_value and self.obj: content = self.obj.read() self.length = len(content) self.hash_value = content_md5(content) self.obj.seek(0,", "name.split(\".\") if len(names) > 1: self.suffix = names[-1] if not self.hash_value and self.obj:", "= name self.suffix = \"\" self.length = 0 self.hash_value = hash_value self.storage =", "import os from oss_client.utils import content_md5 class FileObject(object): def __init__(self, name=\"\", obj=None, hash_value=None,", "= \"\" self.length = 0 self.hash_value = hash_value self.storage = storage names =", "self.length = 0 self.hash_value = hash_value self.storage = storage names = name.split(\".\") if", "os from oss_client.utils import content_md5 class FileObject(object): def __init__(self, name=\"\", obj=None, hash_value=None, storage=None):", "from oss_client.utils import content_md5 class FileObject(object): def __init__(self, name=\"\", obj=None, hash_value=None, storage=None): if", "def content(self, range=None): if self.obj: return self.obj.read() if self.storage: return self.storage.read(self.key(), range) raise", "\"\" self.length = 0 self.hash_value = hash_value self.storage = storage names = name.split(\".\")", "return \".\".join([self.hash_value, self.suffix]) return self.hash_value def content(self, range=None): if self.obj: return self.obj.read() if", "self.length = len(content) self.hash_value = content_md5(content) self.obj.seek(0, os.SEEK_SET) def __str__(self): return self.hash_value def", "hash_value): raise ValueError(\"obj and hash_value both are None\") self.obj = obj self.name =", "self.hash_value def key(self): if self.suffix: return \".\".join([self.hash_value, self.suffix]) return self.hash_value def content(self, range=None):", "range=None): if self.obj: return self.obj.read() if self.storage: return self.storage.read(self.key(), range) raise Exception(\"can not", "__init__(self, name=\"\", obj=None, hash_value=None, storage=None): if not (obj or hash_value): raise ValueError(\"obj and", "FileObject(object): def __init__(self, name=\"\", obj=None, hash_value=None, storage=None): if not (obj or hash_value): raise", "content = self.obj.read() self.length = len(content) self.hash_value = content_md5(content) self.obj.seek(0, os.SEEK_SET) def __str__(self):", "self.hash_value def content(self, range=None): if self.obj: return self.obj.read() if self.storage: return self.storage.read(self.key(), range)", "self.obj.read() self.length = len(content) self.hash_value = content_md5(content) self.obj.seek(0, os.SEEK_SET) def __str__(self): return self.hash_value", "= self.obj.read() self.length = len(content) self.hash_value = content_md5(content) self.obj.seek(0, os.SEEK_SET) def __str__(self): return", "storage names = name.split(\".\") if len(names) > 1: self.suffix = names[-1] if not", "obj=None, hash_value=None, storage=None): if not (obj or hash_value): raise ValueError(\"obj and hash_value both", "self.obj = obj self.name = name self.suffix = \"\" self.length = 0 self.hash_value", "return self.hash_value def key(self): if self.suffix: return \".\".join([self.hash_value, self.suffix]) return self.hash_value def content(self,", "self.suffix]) return self.hash_value def content(self, range=None): if self.obj: return self.obj.read() if self.storage: return", "self.suffix = \"\" self.length = 0 self.hash_value = hash_value self.storage = storage names", "oss_client.utils import content_md5 class FileObject(object): def __init__(self, name=\"\", obj=None, hash_value=None, storage=None): if not", "if self.obj: return self.obj.read() if self.storage: return self.storage.read(self.key(), range) raise Exception(\"can not find", "raise ValueError(\"obj and hash_value both are None\") self.obj = obj self.name = name", "= storage names = name.split(\".\") if len(names) > 1: self.suffix = names[-1] if", "content(self, range=None): if self.obj: return self.obj.read() if self.storage: return self.storage.read(self.key(), range) raise Exception(\"can", "and hash_value both are None\") self.obj = obj self.name = name self.suffix =", "def key(self): if self.suffix: return \".\".join([self.hash_value, self.suffix]) return self.hash_value def content(self, range=None): if", "and self.obj: content = self.obj.read() self.length = len(content) self.hash_value = content_md5(content) self.obj.seek(0, os.SEEK_SET)", "self.obj: content = self.obj.read() self.length = len(content) self.hash_value = content_md5(content) self.obj.seek(0, os.SEEK_SET) def", "name self.suffix = \"\" self.length = 0 self.hash_value = hash_value self.storage = storage" ]
[ "Cooldown(0, per, type) return func return decorator def humanbytes(B): \"Return the given bytes", "a human friendly KB, MB, GB, or TB string\" B = float(B) KB", "< TB: return \"{0:.2f} GB\".format(B / GB) elif TB <= B: return \"{0:.2f}", "< MB: return \"{0:.2f} KB\".format(B / KB) elif MB <= B < GB:", "return \"{0:.2f} KB\".format(B / KB) elif MB <= B < GB: return \"{0:.2f}", "per, type) else: func.__commands_cooldown__ = Cooldown(0, per, type) return func return decorator def", "** 2) # 1,048,576 GB = float(KB ** 3) # 1,073,741,824 TB =", "if not premium: func.__commands_cooldown__ = Cooldown(rate, per, type) else: func.__commands_cooldown__ = Cooldown(0, per,", "MB <= B < GB: return \"{0:.2f} MB\".format(B / MB) elif GB <=", "return \"{0} {1}\".format(B, \"Bytes\" if 0 == B > 1 else \"Byte\") elif", "float(KB ** 2) # 1,048,576 GB = float(KB ** 3) # 1,073,741,824 TB", "return \"{0:.2f} GB\".format(B / GB) elif TB <= B: return \"{0:.2f} TB\".format(B /", "= Cooldown(0, per, type) return func return decorator def humanbytes(B): \"Return the given", "float(B) KB = float(1024) MB = float(KB ** 2) # 1,048,576 GB =", "< KB: return \"{0} {1}\".format(B, \"Bytes\" if 0 == B > 1 else", "import Command def cooldoown(rate, per, type=BucketType.default, premium: bool = False): def decorator(func): if", "def cooldoown(rate, per, type=BucketType.default, premium: bool = False): def decorator(func): if isinstance(func, Command):", "or TB string\" B = float(B) KB = float(1024) MB = float(KB **", "func.__commands_cooldown__ = Cooldown(rate, per, type) else: func.__commands_cooldown__ = Cooldown(0, per, type) return func", "= float(1024) MB = float(KB ** 2) # 1,048,576 GB = float(KB **", "GB = float(KB ** 3) # 1,073,741,824 TB = float(KB ** 4) #", "func.__commands_cooldown__ = Cooldown(0, per, type) return func return decorator def humanbytes(B): \"Return the", "disnake.ext.commands import Command def cooldoown(rate, per, type=BucketType.default, premium: bool = False): def decorator(func):", "func._buckets = CooldownMapping(Cooldown(rate, per, type)) else: if not premium: func.__commands_cooldown__ = Cooldown(rate, per,", "float(KB ** 4) # 1,099,511,627,776 if B < KB: return \"{0} {1}\".format(B, \"Bytes\"", "MB\".format(B / MB) elif GB <= B < TB: return \"{0:.2f} GB\".format(B /", "float(KB ** 3) # 1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776 if", "from disnake.ext.commands import Command def cooldoown(rate, per, type=BucketType.default, premium: bool = False): def", "elif MB <= B < GB: return \"{0:.2f} MB\".format(B / MB) elif GB", "/ KB) elif MB <= B < GB: return \"{0:.2f} MB\".format(B / MB)", "{1}\".format(B, \"Bytes\" if 0 == B > 1 else \"Byte\") elif KB <=", "> 1 else \"Byte\") elif KB <= B < MB: return \"{0:.2f} KB\".format(B", "/ MB) elif GB <= B < TB: return \"{0:.2f} GB\".format(B / GB)", "1,048,576 GB = float(KB ** 3) # 1,073,741,824 TB = float(KB ** 4)", "\"Bytes\" if 0 == B > 1 else \"Byte\") elif KB <= B", "\"{0:.2f} GB\".format(B / GB) elif TB <= B: return \"{0:.2f} TB\".format(B / TB)", "B < GB: return \"{0:.2f} MB\".format(B / MB) elif GB <= B <", "CooldownMapping from disnake.ext.commands import Command def cooldoown(rate, per, type=BucketType.default, premium: bool = False):", "TB: return \"{0:.2f} GB\".format(B / GB) elif TB <= B: return \"{0:.2f} TB\".format(B", "disnake.ext.commands.cooldowns import BucketType, Cooldown, CooldownMapping from disnake.ext.commands import Command def cooldoown(rate, per, type=BucketType.default,", "2) # 1,048,576 GB = float(KB ** 3) # 1,073,741,824 TB = float(KB", "B < MB: return \"{0:.2f} KB\".format(B / KB) elif MB <= B <", "float(1024) MB = float(KB ** 2) # 1,048,576 GB = float(KB ** 3)", "import BucketType, Cooldown, CooldownMapping from disnake.ext.commands import Command def cooldoown(rate, per, type=BucketType.default, premium:", "\"Return the given bytes as a human friendly KB, MB, GB, or TB", "type) return func return decorator def humanbytes(B): \"Return the given bytes as a", "MB) elif GB <= B < TB: return \"{0:.2f} GB\".format(B / GB) elif", "else: func.__commands_cooldown__ = Cooldown(0, per, type) return func return decorator def humanbytes(B): \"Return", "= Cooldown(rate, per, type) else: func.__commands_cooldown__ = Cooldown(0, per, type) return func return", "1 else \"Byte\") elif KB <= B < MB: return \"{0:.2f} KB\".format(B /", "= float(KB ** 4) # 1,099,511,627,776 if B < KB: return \"{0} {1}\".format(B,", "\"{0:.2f} MB\".format(B / MB) elif GB <= B < TB: return \"{0:.2f} GB\".format(B", "B = float(B) KB = float(1024) MB = float(KB ** 2) # 1,048,576", "premium: bool = False): def decorator(func): if isinstance(func, Command): func._buckets = CooldownMapping(Cooldown(rate, per,", "\"Byte\") elif KB <= B < MB: return \"{0:.2f} KB\".format(B / KB) elif", "Command): func._buckets = CooldownMapping(Cooldown(rate, per, type)) else: if not premium: func.__commands_cooldown__ = Cooldown(rate,", "= float(KB ** 2) # 1,048,576 GB = float(KB ** 3) # 1,073,741,824", "MB: return \"{0:.2f} KB\".format(B / KB) elif MB <= B < GB: return", "Cooldown(rate, per, type) else: func.__commands_cooldown__ = Cooldown(0, per, type) return func return decorator", "return decorator def humanbytes(B): \"Return the given bytes as a human friendly KB,", "return \"{0:.2f} MB\".format(B / MB) elif GB <= B < TB: return \"{0:.2f}", "GB <= B < TB: return \"{0:.2f} GB\".format(B / GB) elif TB <=", "else \"Byte\") elif KB <= B < MB: return \"{0:.2f} KB\".format(B / KB)", "== B > 1 else \"Byte\") elif KB <= B < MB: return", "= CooldownMapping(Cooldown(rate, per, type)) else: if not premium: func.__commands_cooldown__ = Cooldown(rate, per, type)", "decorator(func): if isinstance(func, Command): func._buckets = CooldownMapping(Cooldown(rate, per, type)) else: if not premium:", "friendly KB, MB, GB, or TB string\" B = float(B) KB = float(1024)", "GB, or TB string\" B = float(B) KB = float(1024) MB = float(KB", "type=BucketType.default, premium: bool = False): def decorator(func): if isinstance(func, Command): func._buckets = CooldownMapping(Cooldown(rate,", "KB = float(1024) MB = float(KB ** 2) # 1,048,576 GB = float(KB", "KB: return \"{0} {1}\".format(B, \"Bytes\" if 0 == B > 1 else \"Byte\")", "cooldoown(rate, per, type=BucketType.default, premium: bool = False): def decorator(func): if isinstance(func, Command): func._buckets", "def decorator(func): if isinstance(func, Command): func._buckets = CooldownMapping(Cooldown(rate, per, type)) else: if not", "from disnake.ext.commands.cooldowns import BucketType, Cooldown, CooldownMapping from disnake.ext.commands import Command def cooldoown(rate, per,", "humanbytes(B): \"Return the given bytes as a human friendly KB, MB, GB, or", "= float(KB ** 3) # 1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776", "B > 1 else \"Byte\") elif KB <= B < MB: return \"{0:.2f}", "as a human friendly KB, MB, GB, or TB string\" B = float(B)", "if isinstance(func, Command): func._buckets = CooldownMapping(Cooldown(rate, per, type)) else: if not premium: func.__commands_cooldown__", "False): def decorator(func): if isinstance(func, Command): func._buckets = CooldownMapping(Cooldown(rate, per, type)) else: if", "GB: return \"{0:.2f} MB\".format(B / MB) elif GB <= B < TB: return", "TB string\" B = float(B) KB = float(1024) MB = float(KB ** 2)", "func return decorator def humanbytes(B): \"Return the given bytes as a human friendly", "human friendly KB, MB, GB, or TB string\" B = float(B) KB =", "string\" B = float(B) KB = float(1024) MB = float(KB ** 2) #", "B < KB: return \"{0} {1}\".format(B, \"Bytes\" if 0 == B > 1", "KB) elif MB <= B < GB: return \"{0:.2f} MB\".format(B / MB) elif", "if 0 == B > 1 else \"Byte\") elif KB <= B <", "per, type=BucketType.default, premium: bool = False): def decorator(func): if isinstance(func, Command): func._buckets =", "not premium: func.__commands_cooldown__ = Cooldown(rate, per, type) else: func.__commands_cooldown__ = Cooldown(0, per, type)", "\"{0:.2f} KB\".format(B / KB) elif MB <= B < GB: return \"{0:.2f} MB\".format(B", "# 1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776 if B < KB:", "elif GB <= B < TB: return \"{0:.2f} GB\".format(B / GB) elif TB", "elif KB <= B < MB: return \"{0:.2f} KB\".format(B / KB) elif MB", "if B < KB: return \"{0} {1}\".format(B, \"Bytes\" if 0 == B >", "Command def cooldoown(rate, per, type=BucketType.default, premium: bool = False): def decorator(func): if isinstance(func,", "<= B < GB: return \"{0:.2f} MB\".format(B / MB) elif GB <= B", "<= B < TB: return \"{0:.2f} GB\".format(B / GB) elif TB <= B:", "0 == B > 1 else \"Byte\") elif KB <= B < MB:", "1,099,511,627,776 if B < KB: return \"{0} {1}\".format(B, \"Bytes\" if 0 == B", "MB = float(KB ** 2) # 1,048,576 GB = float(KB ** 3) #", "per, type) return func return decorator def humanbytes(B): \"Return the given bytes as", "\"{0} {1}\".format(B, \"Bytes\" if 0 == B > 1 else \"Byte\") elif KB", "MB, GB, or TB string\" B = float(B) KB = float(1024) MB =", "isinstance(func, Command): func._buckets = CooldownMapping(Cooldown(rate, per, type)) else: if not premium: func.__commands_cooldown__ =", "** 4) # 1,099,511,627,776 if B < KB: return \"{0} {1}\".format(B, \"Bytes\" if", "KB\".format(B / KB) elif MB <= B < GB: return \"{0:.2f} MB\".format(B /", "= False): def decorator(func): if isinstance(func, Command): func._buckets = CooldownMapping(Cooldown(rate, per, type)) else:", "< GB: return \"{0:.2f} MB\".format(B / MB) elif GB <= B < TB:", "per, type)) else: if not premium: func.__commands_cooldown__ = Cooldown(rate, per, type) else: func.__commands_cooldown__", "def humanbytes(B): \"Return the given bytes as a human friendly KB, MB, GB,", "given bytes as a human friendly KB, MB, GB, or TB string\" B", "# 1,099,511,627,776 if B < KB: return \"{0} {1}\".format(B, \"Bytes\" if 0 ==", "1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776 if B < KB: return", "the given bytes as a human friendly KB, MB, GB, or TB string\"", "** 3) # 1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776 if B", "= float(B) KB = float(1024) MB = float(KB ** 2) # 1,048,576 GB", "type)) else: if not premium: func.__commands_cooldown__ = Cooldown(rate, per, type) else: func.__commands_cooldown__ =", "<= B < MB: return \"{0:.2f} KB\".format(B / KB) elif MB <= B", "KB, MB, GB, or TB string\" B = float(B) KB = float(1024) MB", "KB <= B < MB: return \"{0:.2f} KB\".format(B / KB) elif MB <=", "return func return decorator def humanbytes(B): \"Return the given bytes as a human", "B < TB: return \"{0:.2f} GB\".format(B / GB) elif TB <= B: return", "3) # 1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776 if B <", "CooldownMapping(Cooldown(rate, per, type)) else: if not premium: func.__commands_cooldown__ = Cooldown(rate, per, type) else:", "type) else: func.__commands_cooldown__ = Cooldown(0, per, type) return func return decorator def humanbytes(B):", "4) # 1,099,511,627,776 if B < KB: return \"{0} {1}\".format(B, \"Bytes\" if 0", "TB = float(KB ** 4) # 1,099,511,627,776 if B < KB: return \"{0}", "# 1,048,576 GB = float(KB ** 3) # 1,073,741,824 TB = float(KB **", "BucketType, Cooldown, CooldownMapping from disnake.ext.commands import Command def cooldoown(rate, per, type=BucketType.default, premium: bool", "bool = False): def decorator(func): if isinstance(func, Command): func._buckets = CooldownMapping(Cooldown(rate, per, type))", "Cooldown, CooldownMapping from disnake.ext.commands import Command def cooldoown(rate, per, type=BucketType.default, premium: bool =", "else: if not premium: func.__commands_cooldown__ = Cooldown(rate, per, type) else: func.__commands_cooldown__ = Cooldown(0,", "decorator def humanbytes(B): \"Return the given bytes as a human friendly KB, MB,", "<filename>bot/utils/utils.py from disnake.ext.commands.cooldowns import BucketType, Cooldown, CooldownMapping from disnake.ext.commands import Command def cooldoown(rate,", "premium: func.__commands_cooldown__ = Cooldown(rate, per, type) else: func.__commands_cooldown__ = Cooldown(0, per, type) return", "bytes as a human friendly KB, MB, GB, or TB string\" B =" ]
[ "= dataset.overviews(1) # list of overviews from biggest to smallest oview = oviews[-1]", "at the smallest thumbnail print('Decimation factor= {}'.format(oview)) # NOTE this is using a", "= random.choice(list(src.block_windows())) arr = src.read(1, window=window) plt.subplot(1 + (num-1)/2, 2, j+1) plt.gca().set_title(item) plt.imshow(arr)", "from matplotlib import pyplot as plt from ibm_botocore.client import Config, ClientError import rasterio", "as src: arr = src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1 + (size-1)/2, 2, i) plt.gca().set_title(item)", "endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: for j in", "plt.colorbar(shrink=0.5) i += 1 plt.show() def tiff_overview(tiff_url): \"\"\" Plot the a little version", "cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as", "plt.show() def plot_results(bucket, results): \"\"\" Plot an array of COS from IBM Cloud", "j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket, results): \"\"\" Plot an array of", "random import ibm_boto3 def plot_random_blocks(bucket, item, num): \"\"\" Plot num random blocks from", "\"\"\" Plot an array of COS from IBM Cloud \"\"\" size = len(results)", "Cloud \"\"\" size = len(results) fig, axs = plt.subplots(len(results), figsize=(20,30)) cos = ibm_boto3.resource(\"s3\",", "= oviews[-1] # let's look at the smallest thumbnail print('Decimation factor= {}'.format(oview)) #", "ibm_boto3 def plot_random_blocks(bucket, item, num): \"\"\" Plot num random blocks from IBM COS", "little version of the map (thumbnail) \"\"\" with rasterio.open(tiff_url) as dataset: oviews =", "in range(0,num): ij, window = random.choice(list(src.block_windows())) arr = src.read(1, window=window) plt.subplot(1 + (num-1)/2,", "bucket \"\"\" fig, axs = plt.subplots(num, figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" )", "+ (num-1)/2, 2, j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket, results): \"\"\" Plot", "\"\"\" fig, axs = plt.subplots(num, figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj", "size = len(results) fig, axs = plt.subplots(len(results), figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\"", "ClientError import rasterio import random import ibm_boto3 def plot_random_blocks(bucket, item, num): \"\"\" Plot", "j in range(0,num): ij, window = random.choice(list(src.block_windows())) arr = src.read(1, window=window) plt.subplot(1 +", "import ibm_boto3 def plot_random_blocks(bucket, item, num): \"\"\" Plot num random blocks from IBM", "print('Decimation factor= {}'.format(oview)) # NOTE this is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail", "plot_random_blocks(bucket, item, num): \"\"\" Plot num random blocks from IBM COS item located", "with rasterio.open(obj.get()['Body']) as src: arr = src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1 + (size-1)/2, 2,", "fig, axs = plt.subplots(len(results), figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i =", "import Config, ClientError import rasterio import random import ibm_boto3 def plot_random_blocks(bucket, item, num):", "Plot the a little version of the map (thumbnail) \"\"\" with rasterio.open(tiff_url) as", "thumbnail print('Decimation factor= {}'.format(oview)) # NOTE this is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html)", "for item in results: obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: arr", "import pyplot as plt from ibm_botocore.client import Config, ClientError import rasterio import random", ") obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: for j in range(0,num):", "(size-1)/2, 2, i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i += 1 plt.show() def tiff_overview(tiff_url): \"\"\"", "plt.show() def tiff_overview(tiff_url): \"\"\" Plot the a little version of the map (thumbnail)", "= dataset.read(1, out_shape=(1, int(dataset.height // oview), int(dataset.width // oview))) print('array type: ', type(thumbnail))", "arr = src.read(1, window=window) plt.subplot(1 + (num-1)/2, 2, j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show()", "the map (thumbnail) \"\"\" with rasterio.open(tiff_url) as dataset: oviews = dataset.overviews(1) # list", "(http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1, out_shape=(1, int(dataset.height // oview), int(dataset.width // oview))) print('array type:", "tiff_overview(tiff_url): \"\"\" Plot the a little version of the map (thumbnail) \"\"\" with", "the a little version of the map (thumbnail) \"\"\" with rasterio.open(tiff_url) as dataset:", "rasterio.open(obj.get()['Body']) as src: arr = src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1 + (size-1)/2, 2, i)", "4 {}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row #') def plot_map(image, title, x_label=\"\", y_label=\"\"): plt.figure(figsize=(10, 15))", ") i = 1 for item in results: obj = cos.Object(bucket, item) with", "factor= {}'.format(oview)) # NOTE this is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail =", "plt.ylabel('Row #') def plot_map(image, title, x_label=\"\", y_label=\"\"): plt.figure(figsize=(10, 15)) plt.imshow(image) plt.colorbar(shrink=0.5) plt.title(title) plt.xlabel(x_label)", "1 for item in results: obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src:", "figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i = 1 for item in", "ibm_botocore.client import Config, ClientError import rasterio import random import ibm_boto3 def plot_random_blocks(bucket, item,", "oviews = dataset.overviews(1) # list of overviews from biggest to smallest oview =", "cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: arr = src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1 +", "oviews[-1] # let's look at the smallest thumbnail print('Decimation factor= {}'.format(oview)) # NOTE", "= cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: arr = src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1", "in results: obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: arr = src.read(1,", "+= 1 plt.show() def tiff_overview(tiff_url): \"\"\" Plot the a little version of the", "matplotlib import pyplot as plt from ibm_botocore.client import Config, ClientError import rasterio import", "src: arr = src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1 + (size-1)/2, 2, i) plt.gca().set_title(item) plt.imshow(arr)", "COS from IBM Cloud \"\"\" size = len(results) fig, axs = plt.subplots(len(results), figsize=(20,30))", "version of the map (thumbnail) \"\"\" with rasterio.open(tiff_url) as dataset: oviews = dataset.overviews(1)", "plt.subplots(num, figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj = cos.Object(bucket, item) with", "located at bucket \"\"\" fig, axs = plt.subplots(num, figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"),", "(num-1)/2, 2, j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket, results): \"\"\" Plot an", "as plt from ibm_botocore.client import Config, ClientError import rasterio import random import ibm_boto3", "smallest oview = oviews[-1] # let's look at the smallest thumbnail print('Decimation factor=", "smallest thumbnail print('Decimation factor= {}'.format(oview)) # NOTE this is using a 'decimated read'", "window=window) plt.subplot(1 + (num-1)/2, 2, j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket, results):", "+ (size-1)/2, 2, i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i += 1 plt.show() def tiff_overview(tiff_url):", "import rasterio import random import ibm_boto3 def plot_random_blocks(bucket, item, num): \"\"\" Plot num", "the smallest thumbnail print('Decimation factor= {}'.format(oview)) # NOTE this is using a 'decimated", "plt.colorbar() plt.title('Overview - Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row #') def plot_map(image, title,", "i += 1 plt.show() def tiff_overview(tiff_url): \"\"\" Plot the a little version of", "i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i += 1 plt.show() def tiff_overview(tiff_url): \"\"\" Plot the", "'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1, out_shape=(1, int(dataset.height // oview), int(dataset.width // oview)))", "using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1, out_shape=(1, int(dataset.height // oview), int(dataset.width", "type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview - Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row", "from IBM Cloud \"\"\" size = len(results) fig, axs = plt.subplots(len(results), figsize=(20,30)) cos", "def plot_results(bucket, results): \"\"\" Plot an array of COS from IBM Cloud \"\"\"", "src.width//10)) plt.subplot(1 + (size-1)/2, 2, i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i += 1 plt.show()", "oview = oviews[-1] # let's look at the smallest thumbnail print('Decimation factor= {}'.format(oview))", "= len(results) fig, axs = plt.subplots(len(results), figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" )", "overviews from biggest to smallest oview = oviews[-1] # let's look at the", "results): \"\"\" Plot an array of COS from IBM Cloud \"\"\" size =", "src: for j in range(0,num): ij, window = random.choice(list(src.block_windows())) arr = src.read(1, window=window)", "oview), int(dataset.width // oview))) print('array type: ', type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview", "axs = plt.subplots(len(results), figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i = 1", "out_shape=(src.height//10, src.width//10)) plt.subplot(1 + (size-1)/2, 2, i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i += 1", "# list of overviews from biggest to smallest oview = oviews[-1] # let's", "plt.subplot(1 + (size-1)/2, 2, i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i += 1 plt.show() def", "list of overviews from biggest to smallest oview = oviews[-1] # let's look", "item, num): \"\"\" Plot num random blocks from IBM COS item located at", "COS item located at bucket \"\"\" fig, axs = plt.subplots(num, figsize=(20,30)) cos =", "= src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1 + (size-1)/2, 2, i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i", "= plt.subplots(num, figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj = cos.Object(bucket, item)", "a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1, out_shape=(1, int(dataset.height // oview), int(dataset.width //", "plt.subplot(1 + (num-1)/2, 2, j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket, results): \"\"\"", "blocks from IBM COS item located at bucket \"\"\" fig, axs = plt.subplots(num,", "item) with rasterio.open(obj.get()['Body']) as src: for j in range(0,num): ij, window = random.choice(list(src.block_windows()))", "figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body'])", "endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i = 1 for item in results: obj = cos.Object(bucket, item)", "pyplot as plt from ibm_botocore.client import Config, ClientError import rasterio import random import", "(thumbnail) \"\"\" with rasterio.open(tiff_url) as dataset: oviews = dataset.overviews(1) # list of overviews", "this is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1, out_shape=(1, int(dataset.height //", "// oview), int(dataset.width // oview))) print('array type: ', type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar()", "num): \"\"\" Plot num random blocks from IBM COS item located at bucket", "= 1 for item in results: obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as", "- Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row #') def plot_map(image, title, x_label=\"\", y_label=\"\"):", "plt from ibm_botocore.client import Config, ClientError import rasterio import random import ibm_boto3 def", "\"\"\" Plot num random blocks from IBM COS item located at bucket \"\"\"", "int(dataset.width // oview))) print('array type: ', type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview -", "def tiff_overview(tiff_url): \"\"\" Plot the a little version of the map (thumbnail) \"\"\"", "#') plt.ylabel('Row #') def plot_map(image, title, x_label=\"\", y_label=\"\"): plt.figure(figsize=(10, 15)) plt.imshow(image) plt.colorbar(shrink=0.5) plt.title(title)", "rasterio.open(tiff_url) as dataset: oviews = dataset.overviews(1) # list of overviews from biggest to", "rasterio import random import ibm_boto3 def plot_random_blocks(bucket, item, num): \"\"\" Plot num random", "thumbnail = dataset.read(1, out_shape=(1, int(dataset.height // oview), int(dataset.width // oview))) print('array type: ',", "plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview - Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row #')", "int(dataset.height // oview), int(dataset.width // oview))) print('array type: ', type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail)", "ij, window = random.choice(list(src.block_windows())) arr = src.read(1, window=window) plt.subplot(1 + (num-1)/2, 2, j+1)", "= ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i = 1 for item in results: obj", "num random blocks from IBM COS item located at bucket \"\"\" fig, axs", "rasterio.open(obj.get()['Body']) as src: for j in range(0,num): ij, window = random.choice(list(src.block_windows())) arr =", "= src.read(1, window=window) plt.subplot(1 + (num-1)/2, 2, j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def", "len(results) fig, axs = plt.subplots(len(results), figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i", "of overviews from biggest to smallest oview = oviews[-1] # let's look at", "of the map (thumbnail) \"\"\" with rasterio.open(tiff_url) as dataset: oviews = dataset.overviews(1) #", "from biggest to smallest oview = oviews[-1] # let's look at the smallest", "plt.xlabel('Column #') plt.ylabel('Row #') def plot_map(image, title, x_label=\"\", y_label=\"\"): plt.figure(figsize=(10, 15)) plt.imshow(image) plt.colorbar(shrink=0.5)", "#') def plot_map(image, title, x_label=\"\", y_label=\"\"): plt.figure(figsize=(10, 15)) plt.imshow(image) plt.colorbar(shrink=0.5) plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label)", "print('array type: ', type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview - Band 4 {}'.format(thumbnail.shape))", "plt.imshow(arr) plt.colorbar(shrink=0.5) i += 1 plt.show() def tiff_overview(tiff_url): \"\"\" Plot the a little", "= cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: for j in range(0,num): ij, window", "type: ', type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview - Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column", "plt.imshow(thumbnail) plt.colorbar() plt.title('Overview - Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row #') def plot_map(image,", "from ibm_botocore.client import Config, ClientError import rasterio import random import ibm_boto3 def plot_random_blocks(bucket,", "dataset.overviews(1) # list of overviews from biggest to smallest oview = oviews[-1] #", "plt.subplots(len(results), figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i = 1 for item", "Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row #') def plot_map(image, title, x_label=\"\", y_label=\"\"): plt.figure(figsize=(10,", "IBM COS item located at bucket \"\"\" fig, axs = plt.subplots(num, figsize=(20,30)) cos", "read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1, out_shape=(1, int(dataset.height // oview), int(dataset.width // oview))) print('array", "ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: for", "fig, axs = plt.subplots(num, figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj =", "window = random.choice(list(src.block_windows())) arr = src.read(1, window=window) plt.subplot(1 + (num-1)/2, 2, j+1) plt.gca().set_title(item)", "plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i += 1 plt.show() def tiff_overview(tiff_url): \"\"\" Plot the a", "src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1 + (size-1)/2, 2, i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i +=", "map (thumbnail) \"\"\" with rasterio.open(tiff_url) as dataset: oviews = dataset.overviews(1) # list of", "# let's look at the smallest thumbnail print('Decimation factor= {}'.format(oview)) # NOTE this", "as dataset: oviews = dataset.overviews(1) # list of overviews from biggest to smallest", "from IBM COS item located at bucket \"\"\" fig, axs = plt.subplots(num, figsize=(20,30))", "to smallest oview = oviews[-1] # let's look at the smallest thumbnail print('Decimation", "Plot num random blocks from IBM COS item located at bucket \"\"\" fig,", "NOTE this is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1, out_shape=(1, int(dataset.height", "at bucket \"\"\" fig, axs = plt.subplots(num, figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\"", "# NOTE this is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1, out_shape=(1,", "for j in range(0,num): ij, window = random.choice(list(src.block_windows())) arr = src.read(1, window=window) plt.subplot(1", "as src: for j in range(0,num): ij, window = random.choice(list(src.block_windows())) arr = src.read(1,", "with rasterio.open(obj.get()['Body']) as src: for j in range(0,num): ij, window = random.choice(list(src.block_windows())) arr", "biggest to smallest oview = oviews[-1] # let's look at the smallest thumbnail", "array of COS from IBM Cloud \"\"\" size = len(results) fig, axs =", "\"\"\" with rasterio.open(tiff_url) as dataset: oviews = dataset.overviews(1) # list of overviews from", "of COS from IBM Cloud \"\"\" size = len(results) fig, axs = plt.subplots(len(results),", "def plot_random_blocks(bucket, item, num): \"\"\" Plot num random blocks from IBM COS item", "', type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview - Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column #')", "an array of COS from IBM Cloud \"\"\" size = len(results) fig, axs", "cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i = 1 for item in results:", "i = 1 for item in results: obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body'])", "random blocks from IBM COS item located at bucket \"\"\" fig, axs =", "item) with rasterio.open(obj.get()['Body']) as src: arr = src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1 + (size-1)/2,", "2, i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) i += 1 plt.show() def tiff_overview(tiff_url): \"\"\" Plot", "1 plt.show() def tiff_overview(tiff_url): \"\"\" Plot the a little version of the map", "plot_results(bucket, results): \"\"\" Plot an array of COS from IBM Cloud \"\"\" size", "is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1, out_shape=(1, int(dataset.height // oview),", "= plt.subplots(len(results), figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i = 1 for", "ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i = 1 for item in results: obj =", "IBM Cloud \"\"\" size = len(results) fig, axs = plt.subplots(len(results), figsize=(20,30)) cos =", "plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket, results): \"\"\" Plot an array of COS from", "plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket, results): \"\"\" Plot an array of COS from IBM", "with rasterio.open(tiff_url) as dataset: oviews = dataset.overviews(1) # list of overviews from biggest", "{}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row #') def plot_map(image, title, x_label=\"\", y_label=\"\"): plt.figure(figsize=(10, 15)) plt.imshow(image)", "range(0,num): ij, window = random.choice(list(src.block_windows())) arr = src.read(1, window=window) plt.subplot(1 + (num-1)/2, 2,", "plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket, results): \"\"\" Plot an array of COS", "\"\"\" size = len(results) fig, axs = plt.subplots(len(results), figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"),", "axs = plt.subplots(num, figsize=(20,30)) cos = ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj = cos.Object(bucket,", "Config, ClientError import rasterio import random import ibm_boto3 def plot_random_blocks(bucket, item, num): \"\"\"", "obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: for j in range(0,num): ij,", "5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview - Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row #') def", "config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: for j", "arr = src.read(1, out_shape=(src.height//10, src.width//10)) plt.subplot(1 + (size-1)/2, 2, i) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5)", "item located at bucket \"\"\" fig, axs = plt.subplots(num, figsize=(20,30)) cos = ibm_boto3.resource(\"s3\",", "src.read(1, window=window) plt.subplot(1 + (num-1)/2, 2, j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket,", "cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: for j in range(0,num): ij, window =", "oview))) print('array type: ', type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview - Band 4", "Plot an array of COS from IBM Cloud \"\"\" size = len(results) fig,", "= ibm_boto3.resource(\"s3\", config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src:", "\"\"\" Plot the a little version of the map (thumbnail) \"\"\" with rasterio.open(tiff_url)", "results: obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: arr = src.read(1, out_shape=(src.height//10,", "let's look at the smallest thumbnail print('Decimation factor= {}'.format(oview)) # NOTE this is", "plt.title('Overview - Band 4 {}'.format(thumbnail.shape)) plt.xlabel('Column #') plt.ylabel('Row #') def plot_map(image, title, x_label=\"\",", "dataset.read(1, out_shape=(1, int(dataset.height // oview), int(dataset.width // oview))) print('array type: ', type(thumbnail)) plt.figure(figsize=(5,", "out_shape=(1, int(dataset.height // oview), int(dataset.width // oview))) print('array type: ', type(thumbnail)) plt.figure(figsize=(5, 5))", "// oview))) print('array type: ', type(thumbnail)) plt.figure(figsize=(5, 5)) plt.imshow(thumbnail) plt.colorbar() plt.title('Overview - Band", "2, j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5) plt.show() def plot_results(bucket, results): \"\"\" Plot an array", "a little version of the map (thumbnail) \"\"\" with rasterio.open(tiff_url) as dataset: oviews", "random.choice(list(src.block_windows())) arr = src.read(1, window=window) plt.subplot(1 + (num-1)/2, 2, j+1) plt.gca().set_title(item) plt.imshow(arr) plt.colorbar(shrink=0.5)", "dataset: oviews = dataset.overviews(1) # list of overviews from biggest to smallest oview", "obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: arr = src.read(1, out_shape=(src.height//10, src.width//10))", "item in results: obj = cos.Object(bucket, item) with rasterio.open(obj.get()['Body']) as src: arr =", "{}'.format(oview)) # NOTE this is using a 'decimated read' (http://rasterio.readthedocs.io/en/latest/topics/resampling.html) thumbnail = dataset.read(1,", "look at the smallest thumbnail print('Decimation factor= {}'.format(oview)) # NOTE this is using", "import random import ibm_boto3 def plot_random_blocks(bucket, item, num): \"\"\" Plot num random blocks", "config=Config(signature_version=\"oauth\"), endpoint_url=\"https://s3.eu-de.cloud-object-storage.appdomain.cloud\" ) i = 1 for item in results: obj = cos.Object(bucket," ]
[ "'Powerlaw' CDEFAULT = 'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite'] # Test that", "column doubles the total mass MD2 = 2.0 * MD def test_dmass(): for", "sizedist.Grain): mtot = test.mdens else: mtot = trapz(test.mdens, test.a) assert percent_diff(mtot, MD) <=", "len(test.mdens) if isinstance(test.size, sizedist.Grain): mtot = test.mdens else: mtot = trapz(test.mdens, test.a) assert", "all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring): test = GrainDist(sstring, CDEFAULT) assert isinstance(test, GrainDist)", "test_dmass(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc,", "material density halves the total number RHO2 = 2.0 * RHO def test_ndens():", "* mtot1) <= 0.01 # Test that doubling the dust grain material density", "mass MD2 = 2.0 * MD def test_dmass(): for ss in ALLOWED_SIZES: for", "Test that doubling the dust grain material density halves the total number RHO2", "with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc) # Test the basic properties and functions", "ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD, rho=RHO)", "pytest import numpy as np from scipy.integrate import trapz from newdust.graindist import *", "= GrainDist(ss, cc, md=MD, rho=RHO) test2 = GrainDist(ss, cc, md=MD, rho=RHO2) if isinstance(test1.size,", "in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD, rho=RHO) test2 = GrainDist(ss, cc, md=MD,", "the helper function does not run on weird strings def test_catch_exception(): ss, cc", "RHO = 3.0 # g c^-3 SDEFAULT = 'Powerlaw' CDEFAULT = 'Silicate' ALLOWED_SIZES", "isinstance(test.a, np.ndarray) assert len(test.a) == len(test.ndens) assert len(test.a) == len(test.mdens) if isinstance(test.size, sizedist.Grain):", "@pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring): test = GrainDist(sstring, CDEFAULT, md=MD) assert isinstance(test.a, np.ndarray) assert", "np.ndarray) assert len(test.a) == len(test.ndens) assert len(test.a) == len(test.mdens) if isinstance(test.size, sizedist.Grain): mtot", "== len(test.mdens) if isinstance(test.size, sizedist.Grain): mtot = test.mdens else: mtot = trapz(test.mdens, test.a)", "# g cm^-2 RHO = 3.0 # g c^-3 SDEFAULT = 'Powerlaw' CDEFAULT", "for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD)", "cc, md=MD2) if isinstance(test1.size, sizedist.Grain): mtot1, mtot2 = test1.mdens, test2.mdens else: mtot1 =", "function runs on all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring): test = GrainDist(sstring, CDEFAULT)", "that the helper function runs on all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring): test", "percent_diff MD = 1.e-5 # g cm^-2 RHO = 3.0 # g c^-3", "cstring) assert isinstance(test, GrainDist) # Test that the helper function does not run", "test.mdens else: mtot = trapz(test.mdens, test.a) assert percent_diff(mtot, MD) <= 0.01 # Test", "doubling the dust mass column doubles the total mass MD2 = 2.0 *", "def test_ndens(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss,", "= trapz(test1.ndens, test1.a) nd2 = trapz(test2.ndens, test2.a) assert percent_diff(nd2, 0.5 * nd1) <=", "GrainDist(ss, cc, md=MD, rho=RHO2) if isinstance(test1.size, sizedist.Grain): nd1, nd2 = test1.ndens, test2.ndens else:", "numpy as np from scipy.integrate import trapz from newdust.graindist import * from .", "CDEFAULT) assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring): test = GrainDist(SDEFAULT, cstring) assert", "GrainDist(sstring, CDEFAULT, md=MD) assert isinstance(test.a, np.ndarray) assert len(test.a) == len(test.ndens) assert len(test.a) ==", "= 2.0 * RHO def test_ndens(): for ss in ALLOWED_SIZES: for cc in", "trapz(test2.mdens, test2.a) assert percent_diff(mtot2, 2.0 * mtot1) <= 0.01 # Test that doubling", "for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD,", "# g c^-3 SDEFAULT = 'Powerlaw' CDEFAULT = 'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS", "import pytest import numpy as np from scipy.integrate import trapz from newdust.graindist import", "from . import percent_diff MD = 1.e-5 # g cm^-2 RHO = 3.0", "cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD, rho=RHO) test2 = GrainDist(ss, cc,", "= 1.e-5 # g cm^-2 RHO = 3.0 # g c^-3 SDEFAULT =", "md=MD, rho=RHO2) if isinstance(test1.size, sizedist.Grain): nd1, nd2 = test1.ndens, test2.ndens else: nd1 =", "cc = 'foo', 'bar' with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc) # Test the", "GrainDist) # Test that the helper function does not run on weird strings", "GrainDist(ss, cc, md=MD) test2 = GrainDist(ss, cc, md=MD2) if isinstance(test1.size, sizedist.Grain): mtot1, mtot2", "GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc) # Test the basic properties and functions of GrainDist", "= test1.mdens, test2.mdens else: mtot1 = trapz(test1.mdens, test1.a) mtot2 = trapz(test2.mdens, test2.a) assert", "3.0 # g c^-3 SDEFAULT = 'Powerlaw' CDEFAULT = 'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff']", "mtot = test.mdens else: mtot = trapz(test.mdens, test.a) assert percent_diff(mtot, MD) <= 0.01", "cc, md=MD, rho=RHO2) if isinstance(test1.size, sizedist.Grain): nd1, nd2 = test1.ndens, test2.ndens else: nd1", "0.01 # Test that doubling the dust grain material density halves the total", "ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite'] # Test that the helper function runs", "if isinstance(test1.size, sizedist.Grain): mtot1, mtot2 = test1.mdens, test2.mdens else: mtot1 = trapz(test1.mdens, test1.a)", "percent_diff(mtot2, 2.0 * mtot1) <= 0.01 # Test that doubling the dust grain", "trapz(test1.ndens, test1.a) nd2 = trapz(test2.ndens, test2.a) assert percent_diff(nd2, 0.5 * nd1) <= 0.01", "SDEFAULT = 'Powerlaw' CDEFAULT = 'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite'] #", "isinstance(test, GrainDist) # Test that the helper function does not run on weird", "assert percent_diff(mtot, MD) <= 0.01 # Test that doubling the dust mass column", "= test1.ndens, test2.ndens else: nd1 = trapz(test1.ndens, test1.a) nd2 = trapz(test2.ndens, test2.a) assert", "test_catch_exception(): ss, cc = 'foo', 'bar' with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc) #", "basic properties and functions of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring): test = GrainDist(sstring,", "MD2 = 2.0 * MD def test_dmass(): for ss in ALLOWED_SIZES: for cc", "nd1, nd2 = test1.ndens, test2.ndens else: nd1 = trapz(test1.ndens, test1.a) nd2 = trapz(test2.ndens,", "else: mtot1 = trapz(test1.mdens, test1.a) mtot2 = trapz(test2.mdens, test2.a) assert percent_diff(mtot2, 2.0 *", "= GrainDist(sstring, CDEFAULT) assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring): test = GrainDist(SDEFAULT,", "# Test that the helper function runs on all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def", "strings def test_catch_exception(): ss, cc = 'foo', 'bar' with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT,", "c^-3 SDEFAULT = 'Powerlaw' CDEFAULT = 'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite']", "density halves the total number RHO2 = 2.0 * RHO def test_ndens(): for", "test1 = GrainDist(ss, cc, md=MD, rho=RHO) test2 = GrainDist(ss, cc, md=MD, rho=RHO2) if", "GrainDist(ss, cc, md=MD, rho=RHO) test2 = GrainDist(ss, cc, md=MD, rho=RHO2) if isinstance(test1.size, sizedist.Grain):", "GrainDist(SDEFAULT, cstring) assert isinstance(test, GrainDist) # Test that the helper function does not", "'foo', 'bar' with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc) # Test the basic properties", "test1.mdens, test2.mdens else: mtot1 = trapz(test1.mdens, test1.a) mtot2 = trapz(test2.mdens, test2.a) assert percent_diff(mtot2,", "cc) # Test the basic properties and functions of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def", "trapz(test.mdens, test.a) assert percent_diff(mtot, MD) <= 0.01 # Test that doubling the dust", "GrainDist(ss, cc, md=MD2) if isinstance(test1.size, sizedist.Grain): mtot1, mtot2 = test1.mdens, test2.mdens else: mtot1", "CDEFAULT = 'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite'] # Test that the", "ALLOWED_SIZES) def test_sstring(sstring): test = GrainDist(sstring, CDEFAULT) assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def", "test_GrainDist(sstring): test = GrainDist(sstring, CDEFAULT, md=MD) assert isinstance(test.a, np.ndarray) assert len(test.a) == len(test.ndens)", "ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD) test2", "ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD) test2 = GrainDist(ss, cc, md=MD2) if isinstance(test1.size,", "['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite'] # Test that the helper function runs on all", "0.01 # Test that doubling the dust mass column doubles the total mass", "test2.mdens else: mtot1 = trapz(test1.mdens, test1.a) mtot2 = trapz(test2.mdens, test2.a) assert percent_diff(mtot2, 2.0", "total number RHO2 = 2.0 * RHO def test_ndens(): for ss in ALLOWED_SIZES:", "else: nd1 = trapz(test1.ndens, test1.a) nd2 = trapz(test2.ndens, test2.a) assert percent_diff(nd2, 0.5 *", "dust mass column doubles the total mass MD2 = 2.0 * MD def", "test2 = GrainDist(ss, cc, md=MD2) if isinstance(test1.size, sizedist.Grain): mtot1, mtot2 = test1.mdens, test2.mdens", "trapz(test1.mdens, test1.a) mtot2 = trapz(test2.mdens, test2.a) assert percent_diff(mtot2, 2.0 * mtot1) <= 0.01", "= test.mdens else: mtot = trapz(test.mdens, test.a) assert percent_diff(mtot, MD) <= 0.01 #", "# Test that doubling the dust grain material density halves the total number", "GrainDist(SDEFAULT, cc) # Test the basic properties and functions of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES)", "# Test that doubling the dust mass column doubles the total mass MD2", "def test_dmass(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss,", "test_sstring(sstring): test = GrainDist(sstring, CDEFAULT) assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring): test", "mtot1 = trapz(test1.mdens, test1.a) mtot2 = trapz(test2.mdens, test2.a) assert percent_diff(mtot2, 2.0 * mtot1)", "sizedist.Grain): mtot1, mtot2 = test1.mdens, test2.mdens else: mtot1 = trapz(test1.mdens, test1.a) mtot2 =", "weird strings def test_catch_exception(): ss, cc = 'foo', 'bar' with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT)", "functions of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring): test = GrainDist(sstring, CDEFAULT, md=MD) assert", "GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring): test = GrainDist(SDEFAULT, cstring) assert isinstance(test, GrainDist) #", "md=MD2) if isinstance(test1.size, sizedist.Grain): mtot1, mtot2 = test1.mdens, test2.mdens else: mtot1 = trapz(test1.mdens,", "percent_diff(mtot, MD) <= 0.01 # Test that doubling the dust mass column doubles", "if isinstance(test.size, sizedist.Grain): mtot = test.mdens else: mtot = trapz(test.mdens, test.a) assert percent_diff(mtot,", "# Test that the helper function does not run on weird strings def", "cc, md=MD) test2 = GrainDist(ss, cc, md=MD2) if isinstance(test1.size, sizedist.Grain): mtot1, mtot2 =", "grain material density halves the total number RHO2 = 2.0 * RHO def", "<= 0.01 # Test that doubling the dust grain material density halves the", "from newdust.graindist import * from . import percent_diff MD = 1.e-5 # g", "scipy.integrate import trapz from newdust.graindist import * from . import percent_diff MD =", "dust grain material density halves the total number RHO2 = 2.0 * RHO", "number RHO2 = 2.0 * RHO def test_ndens(): for ss in ALLOWED_SIZES: for", "function does not run on weird strings def test_catch_exception(): ss, cc = 'foo',", "halves the total number RHO2 = 2.0 * RHO def test_ndens(): for ss", "in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD, rho=RHO) test2", "assert isinstance(test, GrainDist) # Test that the helper function does not run on", "sizedist.Grain): nd1, nd2 = test1.ndens, test2.ndens else: nd1 = trapz(test1.ndens, test1.a) nd2 =", "* RHO def test_ndens(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1", "@pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring): test = GrainDist(sstring, CDEFAULT) assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS)", "= ['Drude','Silicate','Graphite'] # Test that the helper function runs on all types @pytest.mark.parametrize('sstring',", "runs on all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring): test = GrainDist(sstring, CDEFAULT) assert", "test2.a) assert percent_diff(mtot2, 2.0 * mtot1) <= 0.01 # Test that doubling the", "g cm^-2 RHO = 3.0 # g c^-3 SDEFAULT = 'Powerlaw' CDEFAULT =", "import * from . import percent_diff MD = 1.e-5 # g cm^-2 RHO", "# Test the basic properties and functions of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring):", "assert isinstance(test.a, np.ndarray) assert len(test.a) == len(test.ndens) assert len(test.a) == len(test.mdens) if isinstance(test.size,", "test2 = GrainDist(ss, cc, md=MD, rho=RHO2) if isinstance(test1.size, sizedist.Grain): nd1, nd2 = test1.ndens,", "@pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring): test = GrainDist(SDEFAULT, cstring) assert isinstance(test, GrainDist) # Test", "test1 = GrainDist(ss, cc, md=MD) test2 = GrainDist(ss, cc, md=MD2) if isinstance(test1.size, sizedist.Grain):", "helper function does not run on weird strings def test_catch_exception(): ss, cc =", "CDEFAULT) GrainDist(SDEFAULT, cc) # Test the basic properties and functions of GrainDist @pytest.mark.parametrize('sstring',", "mtot2 = trapz(test2.mdens, test2.a) assert percent_diff(mtot2, 2.0 * mtot1) <= 0.01 # Test", "properties and functions of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring): test = GrainDist(sstring, CDEFAULT,", "nd2 = test1.ndens, test2.ndens else: nd1 = trapz(test1.ndens, test1.a) nd2 = trapz(test2.ndens, test2.a)", ". import percent_diff MD = 1.e-5 # g cm^-2 RHO = 3.0 #", "for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD) test2 = GrainDist(ss, cc,", "mtot1) <= 0.01 # Test that doubling the dust grain material density halves", "g c^-3 SDEFAULT = 'Powerlaw' CDEFAULT = 'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS =", "len(test.ndens) assert len(test.a) == len(test.mdens) if isinstance(test.size, sizedist.Grain): mtot = test.mdens else: mtot", "test.a) assert percent_diff(mtot, MD) <= 0.01 # Test that doubling the dust mass", "GrainDist(sstring, CDEFAULT) assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring): test = GrainDist(SDEFAULT, cstring)", "def test_cstring(cstring): test = GrainDist(SDEFAULT, cstring) assert isinstance(test, GrainDist) # Test that the", "mtot2 = test1.mdens, test2.mdens else: mtot1 = trapz(test1.mdens, test1.a) mtot2 = trapz(test2.mdens, test2.a)", "MD = 1.e-5 # g cm^-2 RHO = 3.0 # g c^-3 SDEFAULT", "len(test.a) == len(test.ndens) assert len(test.a) == len(test.mdens) if isinstance(test.size, sizedist.Grain): mtot = test.mdens", "assert percent_diff(mtot2, 2.0 * mtot1) <= 0.01 # Test that doubling the dust", "test_cstring(cstring): test = GrainDist(SDEFAULT, cstring) assert isinstance(test, GrainDist) # Test that the helper", "MD def test_dmass(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 =", "ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD) test2 = GrainDist(ss,", "RHO def test_ndens(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 =", "test = GrainDist(sstring, CDEFAULT, md=MD) assert isinstance(test.a, np.ndarray) assert len(test.a) == len(test.ndens) assert", "2.0 * MD def test_dmass(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS:", "in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD) test2 = GrainDist(ss, cc, md=MD2) if", "ss, cc = 'foo', 'bar' with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc) # Test", "ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD, rho=RHO) test2 = GrainDist(ss, cc, md=MD, rho=RHO2)", "* from . import percent_diff MD = 1.e-5 # g cm^-2 RHO =", "in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD) test2 =", "for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD, rho=RHO) test2 = GrainDist(ss,", "not run on weird strings def test_catch_exception(): ss, cc = 'foo', 'bar' with", "run on weird strings def test_catch_exception(): ss, cc = 'foo', 'bar' with pytest.raises(AssertionError):", "'bar' with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc) # Test the basic properties and", "def test_GrainDist(sstring): test = GrainDist(sstring, CDEFAULT, md=MD) assert isinstance(test.a, np.ndarray) assert len(test.a) ==", "= trapz(test2.mdens, test2.a) assert percent_diff(mtot2, 2.0 * mtot1) <= 0.01 # Test that", "assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring): test = GrainDist(SDEFAULT, cstring) assert isinstance(test,", "from scipy.integrate import trapz from newdust.graindist import * from . import percent_diff MD", "cm^-2 RHO = 3.0 # g c^-3 SDEFAULT = 'Powerlaw' CDEFAULT = 'Silicate'", "MD) <= 0.01 # Test that doubling the dust mass column doubles the", "Test that doubling the dust mass column doubles the total mass MD2 =", "cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD) test2 = GrainDist(ss, cc, md=MD2)", "and functions of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring): test = GrainDist(sstring, CDEFAULT, md=MD)", "assert len(test.a) == len(test.mdens) if isinstance(test.size, sizedist.Grain): mtot = test.mdens else: mtot =", "import percent_diff MD = 1.e-5 # g cm^-2 RHO = 3.0 # g", "mass column doubles the total mass MD2 = 2.0 * MD def test_dmass():", "ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc, md=MD, rho=RHO) test2 =", "ALLOWED_COMPS = ['Drude','Silicate','Graphite'] # Test that the helper function runs on all types", "the dust mass column doubles the total mass MD2 = 2.0 * MD", "cc, md=MD, rho=RHO) test2 = GrainDist(ss, cc, md=MD, rho=RHO2) if isinstance(test1.size, sizedist.Grain): nd1,", "RHO2 = 2.0 * RHO def test_ndens(): for ss in ALLOWED_SIZES: for cc", "isinstance(test.size, sizedist.Grain): mtot = test.mdens else: mtot = trapz(test.mdens, test.a) assert percent_diff(mtot, MD)", "import numpy as np from scipy.integrate import trapz from newdust.graindist import * from", "of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring): test = GrainDist(sstring, CDEFAULT, md=MD) assert isinstance(test.a,", "len(test.a) == len(test.mdens) if isinstance(test.size, sizedist.Grain): mtot = test.mdens else: mtot = trapz(test.mdens,", "['Drude','Silicate','Graphite'] # Test that the helper function runs on all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES)", "= 2.0 * MD def test_dmass(): for ss in ALLOWED_SIZES: for cc in", "<= 0.01 # Test that doubling the dust mass column doubles the total", "np from scipy.integrate import trapz from newdust.graindist import * from . import percent_diff", "doubling the dust grain material density halves the total number RHO2 = 2.0", "rho=RHO2) if isinstance(test1.size, sizedist.Grain): nd1, nd2 = test1.ndens, test2.ndens else: nd1 = trapz(test1.ndens,", "Test that the helper function runs on all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring):", "rho=RHO) test2 = GrainDist(ss, cc, md=MD, rho=RHO2) if isinstance(test1.size, sizedist.Grain): nd1, nd2 =", "md=MD, rho=RHO) test2 = GrainDist(ss, cc, md=MD, rho=RHO2) if isinstance(test1.size, sizedist.Grain): nd1, nd2", "types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring): test = GrainDist(sstring, CDEFAULT) assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring',", "md=MD) test2 = GrainDist(ss, cc, md=MD2) if isinstance(test1.size, sizedist.Grain): mtot1, mtot2 = test1.mdens,", "2.0 * RHO def test_ndens(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS:", "that doubling the dust mass column doubles the total mass MD2 = 2.0", "on all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring): test = GrainDist(sstring, CDEFAULT) assert isinstance(test,", "= GrainDist(sstring, CDEFAULT, md=MD) assert isinstance(test.a, np.ndarray) assert len(test.a) == len(test.ndens) assert len(test.a)", "assert len(test.a) == len(test.ndens) assert len(test.a) == len(test.mdens) if isinstance(test.size, sizedist.Grain): mtot =", "isinstance(test1.size, sizedist.Grain): mtot1, mtot2 = test1.mdens, test2.mdens else: mtot1 = trapz(test1.mdens, test1.a) mtot2", "does not run on weird strings def test_catch_exception(): ss, cc = 'foo', 'bar'", "= trapz(test.mdens, test.a) assert percent_diff(mtot, MD) <= 0.01 # Test that doubling the", "isinstance(test1.size, sizedist.Grain): nd1, nd2 = test1.ndens, test2.ndens else: nd1 = trapz(test1.ndens, test1.a) nd2", "test = GrainDist(sstring, CDEFAULT) assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring): test =", "mtot = trapz(test.mdens, test.a) assert percent_diff(mtot, MD) <= 0.01 # Test that doubling", "test1.ndens, test2.ndens else: nd1 = trapz(test1.ndens, test1.a) nd2 = trapz(test2.ndens, test2.a) assert percent_diff(nd2,", "else: mtot = trapz(test.mdens, test.a) assert percent_diff(mtot, MD) <= 0.01 # Test that", "def test_sstring(sstring): test = GrainDist(sstring, CDEFAULT) assert isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring):", "Test the basic properties and functions of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring): test", "'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite'] # Test that the helper function", "= ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite'] # Test that the helper function runs on", "the basic properties and functions of GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring): test =", "= 'Powerlaw' CDEFAULT = 'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite'] # Test", "= 3.0 # g c^-3 SDEFAULT = 'Powerlaw' CDEFAULT = 'Silicate' ALLOWED_SIZES =", "test = GrainDist(SDEFAULT, cstring) assert isinstance(test, GrainDist) # Test that the helper function", "= GrainDist(SDEFAULT, cstring) assert isinstance(test, GrainDist) # Test that the helper function does", "* MD def test_dmass(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1", "test_ndens(): for ss in ALLOWED_SIZES: for cc in ALLOWED_COMPS: test1 = GrainDist(ss, cc,", "2.0 * mtot1) <= 0.01 # Test that doubling the dust grain material", "mtot1, mtot2 = test1.mdens, test2.mdens else: mtot1 = trapz(test1.mdens, test1.a) mtot2 = trapz(test2.mdens,", "if isinstance(test1.size, sizedist.Grain): nd1, nd2 = test1.ndens, test2.ndens else: nd1 = trapz(test1.ndens, test1.a)", "= GrainDist(ss, cc, md=MD, rho=RHO2) if isinstance(test1.size, sizedist.Grain): nd1, nd2 = test1.ndens, test2.ndens", "helper function runs on all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring): test = GrainDist(sstring,", "isinstance(test, GrainDist) @pytest.mark.parametrize('cstring', ALLOWED_COMPS) def test_cstring(cstring): test = GrainDist(SDEFAULT, cstring) assert isinstance(test, GrainDist)", "def test_catch_exception(): ss, cc = 'foo', 'bar' with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc)", "trapz from newdust.graindist import * from . import percent_diff MD = 1.e-5 #", "pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc) # Test the basic properties and functions of", "GrainDist @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_GrainDist(sstring): test = GrainDist(sstring, CDEFAULT, md=MD) assert isinstance(test.a, np.ndarray)", "CDEFAULT, md=MD) assert isinstance(test.a, np.ndarray) assert len(test.a) == len(test.ndens) assert len(test.a) == len(test.mdens)", "that the helper function does not run on weird strings def test_catch_exception(): ss,", "= GrainDist(ss, cc, md=MD2) if isinstance(test1.size, sizedist.Grain): mtot1, mtot2 = test1.mdens, test2.mdens else:", "= 'foo', 'bar' with pytest.raises(AssertionError): GrainDist(ss, CDEFAULT) GrainDist(SDEFAULT, cc) # Test the basic", "nd1 = trapz(test1.ndens, test1.a) nd2 = trapz(test2.ndens, test2.a) assert percent_diff(nd2, 0.5 * nd1)", "1.e-5 # g cm^-2 RHO = 3.0 # g c^-3 SDEFAULT = 'Powerlaw'", "the total mass MD2 = 2.0 * MD def test_dmass(): for ss in", "total mass MD2 = 2.0 * MD def test_dmass(): for ss in ALLOWED_SIZES:", "import trapz from newdust.graindist import * from . import percent_diff MD = 1.e-5", "ALLOWED_COMPS) def test_cstring(cstring): test = GrainDist(SDEFAULT, cstring) assert isinstance(test, GrainDist) # Test that", "= GrainDist(ss, cc, md=MD) test2 = GrainDist(ss, cc, md=MD2) if isinstance(test1.size, sizedist.Grain): mtot1,", "test1.a) mtot2 = trapz(test2.mdens, test2.a) assert percent_diff(mtot2, 2.0 * mtot1) <= 0.01 #", "ALLOWED_SIZES) def test_GrainDist(sstring): test = GrainDist(sstring, CDEFAULT, md=MD) assert isinstance(test.a, np.ndarray) assert len(test.a)", "Test that the helper function does not run on weird strings def test_catch_exception():", "on weird strings def test_catch_exception(): ss, cc = 'foo', 'bar' with pytest.raises(AssertionError): GrainDist(ss,", "the dust grain material density halves the total number RHO2 = 2.0 *", "that doubling the dust grain material density halves the total number RHO2 =", "md=MD) assert isinstance(test.a, np.ndarray) assert len(test.a) == len(test.ndens) assert len(test.a) == len(test.mdens) if", "test2.ndens else: nd1 = trapz(test1.ndens, test1.a) nd2 = trapz(test2.ndens, test2.a) assert percent_diff(nd2, 0.5", "= trapz(test1.mdens, test1.a) mtot2 = trapz(test2.mdens, test2.a) assert percent_diff(mtot2, 2.0 * mtot1) <=", "== len(test.ndens) assert len(test.a) == len(test.mdens) if isinstance(test.size, sizedist.Grain): mtot = test.mdens else:", "doubles the total mass MD2 = 2.0 * MD def test_dmass(): for ss", "the helper function runs on all types @pytest.mark.parametrize('sstring', ALLOWED_SIZES) def test_sstring(sstring): test =", "= 'Silicate' ALLOWED_SIZES = ['Grain','Powerlaw','ExpCutoff'] ALLOWED_COMPS = ['Drude','Silicate','Graphite'] # Test that the helper", "the total number RHO2 = 2.0 * RHO def test_ndens(): for ss in", "as np from scipy.integrate import trapz from newdust.graindist import * from . import", "newdust.graindist import * from . import percent_diff MD = 1.e-5 # g cm^-2" ]
[ "# \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true, #", "\"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code token\",\"code id_token", "'grant_type': 'authorization_code', # 'code': token.token, # 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', # }) # public_key =", "# \"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], #", "'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self, request, app, token, response): # extra_data_token = requests.post(self.access_token_url, params={ #", "# b\"\\n-----END PUBLIC KEY-----\" # ) # # extra_data = jwt.decode( # extra_data_token,", "# \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true} from ..oauth2.client import", "# }) # public_key = ( # b\"-----BEGIN PUBLIC KEY-----\\n\" # b\"<KEY>\" #", "PUBLIC KEY-----\\n\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" #", "# extra_data = jwt.decode( # extra_data_token, public_key, audience=\"api\", algorithms=\"RS256\" # ) extra_data =", "complete_login(self, request, app, token, response): # extra_data_token = requests.post(self.access_token_url, params={ # 'client_id': app.client_id,", "# b\"IwIDAQAB\" # b\"\\n-----END PUBLIC KEY-----\" # ) # # extra_data = jwt.decode(", "allauth.socialaccount.providers.oauth2.views import ( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from .provider import NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\",", "\"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true} from ..oauth2.client import OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter):", "import requests import jwt from allauth.socialaccount.providers.oauth2.views import ( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from", "OAuth2CallbackView, OAuth2LoginView, ) from .provider import NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\",", "= NoaProvider.id access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self,", "b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\" # b\"IwIDAQAB\" # b\"\\n-----END PUBLIC KEY-----\" # ) # #", "NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\",", "request, app, token, response): # extra_data_token = requests.post(self.access_token_url, params={ # 'client_id': app.client_id, #", "= ( # b\"-----BEGIN PUBLIC KEY-----\\n\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" #", "= 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self, request, app, token, response): # extra_data_token = requests.post(self.access_token_url, params={", "jwt from allauth.socialaccount.providers.oauth2.views import ( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from .provider import NoaProvider", "from .provider import NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", #", "class NoaOAuth2Adapter(OAuth2Adapter): provider_id = NoaProvider.id access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url =", "extra_data_token, public_key, audience=\"api\", algorithms=\"RS256\" # ) extra_data = {} return self.get_provider().sociallogin_from_response( request, extra_data.json()", "# \"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true} from ..oauth2.client import OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter): provider_id = NoaProvider.id", "# \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", #", "\"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\",", "# \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", #", "= 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self, request, app, token,", "# \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", #", "import jwt from allauth.socialaccount.providers.oauth2.views import ( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from .provider import", "\"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true,", "\"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true,", "access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self, request, app,", "# 'grant_type': 'authorization_code', # 'code': token.token, # 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', # }) # public_key", "public_key = ( # b\"-----BEGIN PUBLIC KEY-----\\n\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\"", "# b\"-----BEGIN PUBLIC KEY-----\\n\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" #", "b\"<KEY>\" # b\"IwIDAQAB\" # b\"\\n-----END PUBLIC KEY-----\" # ) # # extra_data =", "\"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code token\",\"code id_token token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"], #", "import NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", #", "\"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true} from ..oauth2.client import OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter): provider_id =", "'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self, request, app, token, response): # extra_data_token =", "token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true}", "# b\"<KEY>\" # b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\" # b\"IwIDAQAB\" # b\"\\n-----END PUBLIC", "b\"\\n-----END PUBLIC KEY-----\" # ) # # extra_data = jwt.decode( # extra_data_token, public_key,", "profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self, request, app, token, response): # extra_data_token = requests.post(self.access_token_url,", "b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\" # b\"IwIDAQAB\" # b\"\\n-----END", "( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from .provider import NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\",", "# \"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code token\",\"code", "# 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', # }) # public_key = ( # b\"-----BEGIN PUBLIC KEY-----\\n\"", "b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\" # b\"IwIDAQAB\" # b\"\\n-----END PUBLIC KEY-----\" # )", "'client_id': app.client_id, # 'grant_type': 'authorization_code', # 'code': token.token, # 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', # })", "from allauth.socialaccount.providers.oauth2.views import ( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from .provider import NoaProvider #", "extra_data_token = requests.post(self.access_token_url, params={ # 'client_id': app.client_id, # 'grant_type': 'authorization_code', # 'code': token.token,", "OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter): provider_id = NoaProvider.id access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url", "KEY-----\\n\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\"", "id_token\",\"code token\",\"code id_token token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"], #", "from ..oauth2.client import OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter): provider_id = NoaProvider.id access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url", "KEY-----\" # ) # # extra_data = jwt.decode( # extra_data_token, public_key, audience=\"api\", algorithms=\"RS256\"", "# \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true} from", "# \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], #", "\"request_parameter_supported\":true} from ..oauth2.client import OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter): provider_id = NoaProvider.id access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token'", "# \"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true} from ..oauth2.client import OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter): provider_id", "token\",\"code id_token token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"],", "# b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\" # b\"IwIDAQAB\" # b\"\\n-----END PUBLIC KEY-----\" # ) #", "# \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code token\",\"code id_token token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"],", "# \"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], #", "import OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter): provider_id = NoaProvider.id access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize'", "extra_data = {} return self.get_provider().sociallogin_from_response( request, extra_data.json() ) oauth2_login = OAuth2LoginView.adapter_view(NoaOAuth2Adapter) oauth2_callback =", "'code': token.token, # 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', # }) # public_key = ( # b\"-----BEGIN", "requests.post(self.access_token_url, params={ # 'client_id': app.client_id, # 'grant_type': 'authorization_code', # 'code': token.token, # 'redirect_uri':", "# extra_data_token, public_key, audience=\"api\", algorithms=\"RS256\" # ) extra_data = {} return self.get_provider().sociallogin_from_response( request,", "# \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true, #", "authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self, request, app, token, response): #", "# # extra_data = jwt.decode( # extra_data_token, public_key, audience=\"api\", algorithms=\"RS256\" # ) extra_data", "# ) extra_data = {} return self.get_provider().sociallogin_from_response( request, extra_data.json() ) oauth2_login = OAuth2LoginView.adapter_view(NoaOAuth2Adapter)", "\"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true} from ..oauth2.client", "\"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code", ") extra_data = {} return self.get_provider().sociallogin_from_response( request, extra_data.json() ) oauth2_login = OAuth2LoginView.adapter_view(NoaOAuth2Adapter) oauth2_callback", "\"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code token\",\"code id_token token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], #", "NoaOAuth2Adapter(OAuth2Adapter): provider_id = NoaProvider.id access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo'", "\"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true,", "requests import jwt from allauth.socialaccount.providers.oauth2.views import ( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from .provider", "\"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\",", "# \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true, #", "# b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\" #", "# \"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code", "# \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code token\",\"code id_token token\"],", "NoaProvider.id access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self, request,", "# \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", #", "PUBLIC KEY-----\" # ) # # extra_data = jwt.decode( # extra_data_token, public_key, audience=\"api\",", "\"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"],", "# b\"<KEY>\" # b\"IwIDAQAB\" # b\"\\n-----END PUBLIC KEY-----\" # ) # # extra_data", "}) # public_key = ( # b\"-----BEGIN PUBLIC KEY-----\\n\" # b\"<KEY>\" # b\"<KEY>\"", "\"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\",", "\"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"],", "'authorization_code', # 'code': token.token, # 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', # }) # public_key = (", "\"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\", # \"revocation_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/revocation\", # \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true,", "\"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token", "# \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code token\",\"code id_token token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"],", "# extra_data_token = requests.post(self.access_token_url, params={ # 'client_id': app.client_id, # 'grant_type': 'authorization_code', # 'code':", "b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\" # b\"IwIDAQAB\"", "audience=\"api\", algorithms=\"RS256\" # ) extra_data = {} return self.get_provider().sociallogin_from_response( request, extra_data.json() ) oauth2_login", "OAuth2LoginView, ) from .provider import NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", #", "# \"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true} from ..oauth2.client import OAuth2Error class", "b\"IwIDAQAB\" # b\"\\n-----END PUBLIC KEY-----\" # ) # # extra_data = jwt.decode( #", "jwt.decode( # extra_data_token, public_key, audience=\"api\", algorithms=\"RS256\" # ) extra_data = {} return self.get_provider().sociallogin_from_response(", "# public_key = ( # b\"-----BEGIN PUBLIC KEY-----\\n\" # b\"<KEY>\" # b\"<KEY>\" #", "<filename>allauth/socialaccount/providers/noa/views.py import requests import jwt from allauth.socialaccount.providers.oauth2.views import ( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, )", "# b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\" # b\"IwIDAQAB\" # b\"\\n-----END PUBLIC KEY-----\" #", "# \"request_parameter_supported\":true} from ..oauth2.client import OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter): provider_id = NoaProvider.id access_token_url =", ") from .provider import NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\",", "= requests.post(self.access_token_url, params={ # 'client_id': app.client_id, # 'grant_type': 'authorization_code', # 'code': token.token, #", ".provider import NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\",", "token, response): # extra_data_token = requests.post(self.access_token_url, params={ # 'client_id': app.client_id, # 'grant_type': 'authorization_code',", "= jwt.decode( # extra_data_token, public_key, audience=\"api\", algorithms=\"RS256\" # ) extra_data = {} return", "{\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", # \"check_session_iframe\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/checksession\",", "# ) # # extra_data = jwt.decode( # extra_data_token, public_key, audience=\"api\", algorithms=\"RS256\" #", "\"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true} from ..oauth2.client import OAuth2Error", "response): # extra_data_token = requests.post(self.access_token_url, params={ # 'client_id': app.client_id, # 'grant_type': 'authorization_code', #", "import ( OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from .provider import NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", #", "# 'client_id': app.client_id, # 'grant_type': 'authorization_code', # 'code': token.token, # 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', #", "( # b\"-----BEGIN PUBLIC KEY-----\\n\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\"", "token\",\"code id_token\",\"code token\",\"code id_token token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"],", "'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self, request, app, token, response):", "extra_data = jwt.decode( # extra_data_token, public_key, audience=\"api\", algorithms=\"RS256\" # ) extra_data = {}", "= {} return self.get_provider().sociallogin_from_response( request, extra_data.json() ) oauth2_login = OAuth2LoginView.adapter_view(NoaOAuth2Adapter) oauth2_callback = OAuth2CallbackView.adapter_view(NoaOAuth2Adapter)", "app, token, response): # extra_data_token = requests.post(self.access_token_url, params={ # 'client_id': app.client_id, # 'grant_type':", "b\"<KEY>\" # b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\" # b\"IwIDAQAB\" # b\"\\n-----END PUBLIC KEY-----\"", "b\"-----BEGIN PUBLIC KEY-----\\n\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\"", "\"code_challenge_methods_supported\":[\"plain\",\"S256\"], # \"request_parameter_supported\":true} from ..oauth2.client import OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter): provider_id = NoaProvider.id access_token_url", "\"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true, # \"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"],", "params={ # 'client_id': app.client_id, # 'grant_type': 'authorization_code', # 'code': token.token, # 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/',", "'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', # }) # public_key = ( # b\"-----BEGIN PUBLIC KEY-----\\n\" #", "\"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code token\",\"code id_token token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], #", "'https://noahow.com/accounts/noa/login/callback/', # }) # public_key = ( # b\"-----BEGIN PUBLIC KEY-----\\n\" # b\"<KEY>\"", "app.client_id, # 'grant_type': 'authorization_code', # 'code': token.token, # 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', # }) #", "public_key, audience=\"api\", algorithms=\"RS256\" # ) extra_data = {} return self.get_provider().sociallogin_from_response( request, extra_data.json() )", "def complete_login(self, request, app, token, response): # extra_data_token = requests.post(self.access_token_url, params={ # 'client_id':", "# b\"<KEY>\" # b\"<KEY>\" # b\"<KEY>\" # b\"cfnCXzIwLqkFhBWk941IaRVvy5xp4wXcch45T6pYKCkBF2pj6mreMKExg1uMYY1n\" # b\"<KEY>\" # b\"IwIDAQAB\" #", "OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView, ) from .provider import NoaProvider # {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", #", "..oauth2.client import OAuth2Error class NoaOAuth2Adapter(OAuth2Adapter): provider_id = NoaProvider.id access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url =", "# \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code token\",\"code id_token token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"],", "\"scopes_supported\":[\"profile\",\"openid\",\"email\",\"api\",\"offline_access\"], # \"claims_supported\":[\"name\",\"family_name\",\"given_name\",\"middle_name\",\"nickname\",\"preferred_username\",\"profile\",\"picture\",\"website\",\"gender\",\"birthdate\",\"zoneinfo\",\"locale\",\"updated_at\",\"sub\",\"email\",\"email_verified\"], # \"grant_types_supported\":[\"authorization_code\",\"client_credentials\",\"refresh_token\",\"implicit\",\"password\",\"urn:ietf:params:oauth:grant-type:device_code\"], # \"response_types_supported\":[\"code\",\"token\",\"id_token\",\"id_token token\",\"code id_token\",\"code token\",\"code id_token token\"], #", "# 'code': token.token, # 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', # }) # public_key = ( #", "token.token, # 'redirect_uri': 'https://noahow.com/accounts/noa/login/callback/', # }) # public_key = ( # b\"-----BEGIN PUBLIC", "provider_id = NoaProvider.id access_token_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/token' authorize_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def", "algorithms=\"RS256\" # ) extra_data = {} return self.get_provider().sociallogin_from_response( request, extra_data.json() ) oauth2_login =", "= 'https://noaidentitydev.azurewebsites.net/authorization/connect/authorize' profile_url = 'https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo' def complete_login(self, request, app, token, response): # extra_data_token", ") # # extra_data = jwt.decode( # extra_data_token, public_key, audience=\"api\", algorithms=\"RS256\" # )", "# {\"issuer\":\"https://noaidentitydev.azurewebsites.net/authorization\", # \"jwks_uri\":\"https://noaidentitydev.azurewebsites.net/authorization/.well-known/openid-configuration/jwks\", # \"authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/authorize\", # \"token_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/token\", # \"userinfo_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/userinfo\", # \"end_session_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/endsession\", #", "# \"introspection_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/introspect\", # \"device_authorization_endpoint\":\"https://noaidentitydev.azurewebsites.net/authorization/connect/deviceauthorization\", # \"frontchannel_logout_supported\":true, # \"frontchannel_logout_session_supported\":true, # \"backchannel_logout_supported\":true, # \"backchannel_logout_session_supported\":true, #", "id_token token\"], # \"response_modes_supported\":[\"form_post\",\"query\",\"fragment\"], # \"token_endpoint_auth_methods_supported\":[\"client_secret_basic\",\"client_secret_post\"], # \"id_token_signing_alg_values_supported\":[\"RS256\"], # \"subject_types_supported\":[\"public\"], # \"code_challenge_methods_supported\":[\"plain\",\"S256\"], #" ]
[ "-> str : stack = Stack() while num > 0: remender = num", "from Stack import Stack def intToBinary(num: int) -> str : stack = Stack()", "a Number: \")) if num < 0: print(\"Enter a Positive Number\") quit() result", "binary += str(stack.pop()) return binary num = int(input(\"Enter a Number: \")) if num", "Number: \")) if num < 0: print(\"Enter a Positive Number\") quit() result =", "num // 2 binary = \"\" while not stack.is_empty(): binary += str(stack.pop()) return", "import Stack def intToBinary(num: int) -> str : stack = Stack() while num", "Stack import Stack def intToBinary(num: int) -> str : stack = Stack() while", "Stack.py from Stack import Stack def intToBinary(num: int) -> str : stack =", "= Stack() while num > 0: remender = num % 2 stack.push(remender) num", "num < 0: print(\"Enter a Positive Number\") quit() result = intToBinary(num) print(\"Binary: \",result)", "int(input(\"Enter a Number: \")) if num < 0: print(\"Enter a Positive Number\") quit()", "str(stack.pop()) return binary num = int(input(\"Enter a Number: \")) if num < 0:", "num = num // 2 binary = \"\" while not stack.is_empty(): binary +=", "0: remender = num % 2 stack.push(remender) num = num // 2 binary", "Stack def intToBinary(num: int) -> str : stack = Stack() while num >", "stack.push(remender) num = num // 2 binary = \"\" while not stack.is_empty(): binary", "binary = \"\" while not stack.is_empty(): binary += str(stack.pop()) return binary num =", "\"\" while not stack.is_empty(): binary += str(stack.pop()) return binary num = int(input(\"Enter a", "= num // 2 binary = \"\" while not stack.is_empty(): binary += str(stack.pop())", "stack = Stack() while num > 0: remender = num % 2 stack.push(remender)", "\")) if num < 0: print(\"Enter a Positive Number\") quit() result = intToBinary(num)", "+= str(stack.pop()) return binary num = int(input(\"Enter a Number: \")) if num <", "2 stack.push(remender) num = num // 2 binary = \"\" while not stack.is_empty():", "= num % 2 stack.push(remender) num = num // 2 binary = \"\"", "> 0: remender = num % 2 stack.push(remender) num = num // 2", "num % 2 stack.push(remender) num = num // 2 binary = \"\" while", "the Stack.py from Stack import Stack def intToBinary(num: int) -> str : stack", "binary num = int(input(\"Enter a Number: \")) if num < 0: print(\"Enter a", "stack.is_empty(): binary += str(stack.pop()) return binary num = int(input(\"Enter a Number: \")) if", "while not stack.is_empty(): binary += str(stack.pop()) return binary num = int(input(\"Enter a Number:", ": stack = Stack() while num > 0: remender = num % 2", "% 2 stack.push(remender) num = num // 2 binary = \"\" while not", "return binary num = int(input(\"Enter a Number: \")) if num < 0: print(\"Enter", "int) -> str : stack = Stack() while num > 0: remender =", "if num < 0: print(\"Enter a Positive Number\") quit() result = intToBinary(num) print(\"Binary:", "str : stack = Stack() while num > 0: remender = num %", "intToBinary(num: int) -> str : stack = Stack() while num > 0: remender", "not stack.is_empty(): binary += str(stack.pop()) return binary num = int(input(\"Enter a Number: \"))", "// 2 binary = \"\" while not stack.is_empty(): binary += str(stack.pop()) return binary", "num > 0: remender = num % 2 stack.push(remender) num = num //", "num = int(input(\"Enter a Number: \")) if num < 0: print(\"Enter a Positive", "while num > 0: remender = num % 2 stack.push(remender) num = num", "= int(input(\"Enter a Number: \")) if num < 0: print(\"Enter a Positive Number\")", "#imported the Stack.py from Stack import Stack def intToBinary(num: int) -> str :", "= \"\" while not stack.is_empty(): binary += str(stack.pop()) return binary num = int(input(\"Enter", "Stack() while num > 0: remender = num % 2 stack.push(remender) num =", "remender = num % 2 stack.push(remender) num = num // 2 binary =", "def intToBinary(num: int) -> str : stack = Stack() while num > 0:", "2 binary = \"\" while not stack.is_empty(): binary += str(stack.pop()) return binary num" ]
[ "# -> str xxxx1 = str(\"123\") print(type(xxxx1), xxxx1) xxxx2 = str(123) print(type(xxxx2), xxxx2)", "False ################################################## xx1 = int(x1) xx3 = int(x3) xx4 = int(x4) xx5 =", "xx5 = int(x5) print(type(xx1)) print(type(xx3)) print(type(xx4), xx4) print(type(xx5), xx5) xx2 = float(x2) print(type(xx2),", "xx2) # x3 = \"123\" xxx3 = float(x3) print(type(xxx3), xxx3) # -> str", "int(x5) print(type(xx1)) print(type(xx3)) print(type(xx4), xx4) print(type(xx5), xx5) xx2 = float(x2) print(type(xx2), xx2) #", "str xxxx1 = str(\"123\") print(type(xxxx1), xxxx1) xxxx2 = str(123) print(type(xxxx2), xxxx2) xxxx3 =", "= str(\"123\") print(type(xxxx1), xxxx1) xxxx2 = str(123) print(type(xxxx2), xxxx2) xxxx3 = str(1.23) print(type(xxxx3),", "= 1.5 x2 = 3 x3 = \"123\" x4 = True x5 =", "float(x2) print(type(xx2), xx2) # x3 = \"123\" xxx3 = float(x3) print(type(xxx3), xxx3) #", "= float(x3) print(type(xxx3), xxx3) # -> str xxxx1 = str(\"123\") print(type(xxxx1), xxxx1) xxxx2", "int(x1) xx3 = int(x3) xx4 = int(x4) xx5 = int(x5) print(type(xx1)) print(type(xx3)) print(type(xx4),", "print(type(xx3)) print(type(xx4), xx4) print(type(xx5), xx5) xx2 = float(x2) print(type(xx2), xx2) # x3 =", "\"123\" xxx3 = float(x3) print(type(xxx3), xxx3) # -> str xxxx1 = str(\"123\") print(type(xxxx1),", "print(type(xx1)) print(type(xx3)) print(type(xx4), xx4) print(type(xx5), xx5) xx2 = float(x2) print(type(xx2), xx2) # x3", "# x3 = \"123\" xxx3 = float(x3) print(type(xxx3), xxx3) # -> str xxxx1", "= \"123\" x4 = True x5 = False ################################################## xx1 = int(x1) xx3", "x2 = 3 x3 = \"123\" x4 = True x5 = False ##################################################", "= float(x2) print(type(xx2), xx2) # x3 = \"123\" xxx3 = float(x3) print(type(xxx3), xxx3)", "xx4 = int(x4) xx5 = int(x5) print(type(xx1)) print(type(xx3)) print(type(xx4), xx4) print(type(xx5), xx5) xx2", "= int(x4) xx5 = int(x5) print(type(xx1)) print(type(xx3)) print(type(xx4), xx4) print(type(xx5), xx5) xx2 =", "= int(x1) xx3 = int(x3) xx4 = int(x4) xx5 = int(x5) print(type(xx1)) print(type(xx3))", "print(type(xx4), xx4) print(type(xx5), xx5) xx2 = float(x2) print(type(xx2), xx2) # x3 = \"123\"", "xx5) xx2 = float(x2) print(type(xx2), xx2) # x3 = \"123\" xxx3 = float(x3)", "= \"123\" xxx3 = float(x3) print(type(xxx3), xxx3) # -> str xxxx1 = str(\"123\")", "xx3 = int(x3) xx4 = int(x4) xx5 = int(x5) print(type(xx1)) print(type(xx3)) print(type(xx4), xx4)", "xx4) print(type(xx5), xx5) xx2 = float(x2) print(type(xx2), xx2) # x3 = \"123\" xxx3", "1.5 x2 = 3 x3 = \"123\" x4 = True x5 = False", "################################################## xx1 = int(x1) xx3 = int(x3) xx4 = int(x4) xx5 = int(x5)", "x1 = 1.5 x2 = 3 x3 = \"123\" x4 = True x5", "x5 = False ################################################## xx1 = int(x1) xx3 = int(x3) xx4 = int(x4)", "x3 = \"123\" x4 = True x5 = False ################################################## xx1 = int(x1)", "xx1 = int(x1) xx3 = int(x3) xx4 = int(x4) xx5 = int(x5) print(type(xx1))", "xxxx2) xxxx3 = str(1.23) print(type(xxxx3), xxxx3) x1 = \"123\" x1 = \"234\" print(x1)", "\"123\" x4 = True x5 = False ################################################## xx1 = int(x1) xx3 =", "x4 = True x5 = False ################################################## xx1 = int(x1) xx3 = int(x3)", "= False ################################################## xx1 = int(x1) xx3 = int(x3) xx4 = int(x4) xx5", "print(type(xxx3), xxx3) # -> str xxxx1 = str(\"123\") print(type(xxxx1), xxxx1) xxxx2 = str(123)", "-> str xxxx1 = str(\"123\") print(type(xxxx1), xxxx1) xxxx2 = str(123) print(type(xxxx2), xxxx2) xxxx3", "print(type(xxxx1), xxxx1) xxxx2 = str(123) print(type(xxxx2), xxxx2) xxxx3 = str(1.23) print(type(xxxx3), xxxx3) x1", "print(type(xx2), xx2) # x3 = \"123\" xxx3 = float(x3) print(type(xxx3), xxx3) # ->", "= int(x3) xx4 = int(x4) xx5 = int(x5) print(type(xx1)) print(type(xx3)) print(type(xx4), xx4) print(type(xx5),", "x3 = \"123\" xxx3 = float(x3) print(type(xxx3), xxx3) # -> str xxxx1 =", "xxxx1 = str(\"123\") print(type(xxxx1), xxxx1) xxxx2 = str(123) print(type(xxxx2), xxxx2) xxxx3 = str(1.23)", "float(x3) print(type(xxx3), xxx3) # -> str xxxx1 = str(\"123\") print(type(xxxx1), xxxx1) xxxx2 =", "int(x4) xx5 = int(x5) print(type(xx1)) print(type(xx3)) print(type(xx4), xx4) print(type(xx5), xx5) xx2 = float(x2)", "int(x3) xx4 = int(x4) xx5 = int(x5) print(type(xx1)) print(type(xx3)) print(type(xx4), xx4) print(type(xx5), xx5)", "3 x3 = \"123\" x4 = True x5 = False ################################################## xx1 =", "xxx3 = float(x3) print(type(xxx3), xxx3) # -> str xxxx1 = str(\"123\") print(type(xxxx1), xxxx1)", "str(123) print(type(xxxx2), xxxx2) xxxx3 = str(1.23) print(type(xxxx3), xxxx3) x1 = \"123\" x1 =", "xxxx2 = str(123) print(type(xxxx2), xxxx2) xxxx3 = str(1.23) print(type(xxxx3), xxxx3) x1 = \"123\"", "print(type(xxxx2), xxxx2) xxxx3 = str(1.23) print(type(xxxx3), xxxx3) x1 = \"123\" x1 = \"234\"", "xxxx1) xxxx2 = str(123) print(type(xxxx2), xxxx2) xxxx3 = str(1.23) print(type(xxxx3), xxxx3) x1 =", "= True x5 = False ################################################## xx1 = int(x1) xx3 = int(x3) xx4", "xxx3) # -> str xxxx1 = str(\"123\") print(type(xxxx1), xxxx1) xxxx2 = str(123) print(type(xxxx2),", "print(type(xx5), xx5) xx2 = float(x2) print(type(xx2), xx2) # x3 = \"123\" xxx3 =", "= str(123) print(type(xxxx2), xxxx2) xxxx3 = str(1.23) print(type(xxxx3), xxxx3) x1 = \"123\" x1", "str(\"123\") print(type(xxxx1), xxxx1) xxxx2 = str(123) print(type(xxxx2), xxxx2) xxxx3 = str(1.23) print(type(xxxx3), xxxx3)", "xx2 = float(x2) print(type(xx2), xx2) # x3 = \"123\" xxx3 = float(x3) print(type(xxx3),", "True x5 = False ################################################## xx1 = int(x1) xx3 = int(x3) xx4 =", "= int(x5) print(type(xx1)) print(type(xx3)) print(type(xx4), xx4) print(type(xx5), xx5) xx2 = float(x2) print(type(xx2), xx2)", "= 3 x3 = \"123\" x4 = True x5 = False ################################################## xx1" ]
[ "super().__init__(message) def handler(event, context): id_index_table = event['id_index_table'] s3_index_table = event['s3_index_table'] id_count_table = event['id_count_table']", "function can fail when event['write_id_index_status']['retries_left'] == 0. \"\"\" def __init__(self, message): super().__init__(message) def", "function's retry statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS = [ 'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException',", "'retries_left': int # How many retries left in case of an error. #", "ensure the step function goes to its catch handling step. The error information", "to write the morton index of a cuboid object key to the id", "the step function that calls this lambda. Since this lambda controls retries via", "the step function should proceed to its catch handling when it receives one", "event['cuboid_object_key'], obj_id, event['version']) write_id_index_status['done'] = True except botocore.exceptions.ClientError as ex: # Probably had", "DynamoClientError to ensure the step function goes to its catch handling step. The", "= False write_id_index_status['retries_left'] = ( int(write_id_index_status['retries_left']) - 1) # Prepare decorrelated jitter backoff", "# Lambda to write the morton index of a cuboid object key to", "bossutils.aws import get_region import json import random from spdb.spatialdb.object_indices import ObjectIndices from time", "as a type. Returns: (str|None): Just the name of the class or None.", "in event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id, event['version']) write_id_index_status['done'] = True except botocore.exceptions.ClientError as", "How many retries left in case of an error. # } # }", "cuboid_bucket, get_region()) try: for obj_id in event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id, event['version']) write_id_index_status['done']", "backoff algorithm described in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # # It expects to get from", "'version': '...', # 'write_id_index_status': { # 'done': False, # 'delay': 0, # 'retries_left':", "'...', # 'version': '...', # 'write_id_index_status': { # 'done': False, # 'delay': 0,", "= round( random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3)) def get_class_name(type_): \"\"\" Get just the class", "step function should proceed to its catch handling when it receives one of", "information available to the step function isn't as useful when wrapped, so the", "{ # 'id_index_table': ..., # 's3_index_table': ..., # 'id_count_table': ..., # 'cuboid_bucket': ...,", "= 5 \"\"\" These derived exceptions of botocore.exceptions.ClientError will be not be retried", "= ObjectIndices( s3_index_table, id_index_table, id_count_table, cuboid_bucket, get_region()) try: for obj_id in event['id_group']: obj_ind.write_id_index(", "id in the # DynamoDB id index table. # # If there are", "retried by the step function that calls this lambda. Since this lambda controls", "= (event['id_index_new_chunk_threshold']) obj_ind = ObjectIndices( s3_index_table, id_index_table, id_count_table, cuboid_bucket, get_region()) try: for obj_id", "= event['id_index_table'] s3_index_table = event['s3_index_table'] id_count_table = event['id_count_table'] cuboid_bucket = event['cuboid_bucket'] write_id_index_status =", "catch handling step. The error information available to the step function isn't as", "just the class name (w/o module(s) from the type. Args: type_ (type): Class", "the expected errors are enumerated below and in the step function's retry statement.", "botocore from bossutils.aws import get_region import json import random from spdb.spatialdb.object_indices import ObjectIndices", "Update this dict. \"\"\" write_id_index_status['done'] = False write_id_index_status['retries_left'] = ( int(write_id_index_status['retries_left']) - 1)", "'ResourceNotFoundException', 'TableNotFoundException' ] class DynamoClientError(Exception): \"\"\" Wrap boto3 ClientError exceptions so the step", "if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg = '{}: {}'.format(type(ex), ex) raise DynamoClientError(msg) from", "'s3_index_table': ..., # 'id_count_table': ..., # 'cuboid_bucket': ..., # 'id_index_new_chunk_threshold': ..., # 'cuboid_object_key':", "DynamoClientError(Exception): \"\"\" Wrap boto3 ClientError exceptions so the step function can fail when", "this lambda controls retries via event['write_id_index_status']['retries_left'] the step function should proceed to its", "Since this lambda controls retries via event['write_id_index_status']['retries_left'] the step function should proceed to", "case of an error. # } # } import botocore from bossutils.aws import", "to retry. Args: write_id_index_status (dict): Update this dict. \"\"\" write_id_index_status['done'] = False write_id_index_status['retries_left']", "index table. # # If there are failures, uses decorrelatd jitter backoff algorithm", "derived exceptions of botocore.exceptions.ClientError will be not be retried by the step function", "# 'id_index_table': ..., # 's3_index_table': ..., # 'id_count_table': ..., # 'cuboid_bucket': ..., #", "handling when it receives one of these exceptions. Derived exceptions of ClientError that", "= event['id_count_table'] cuboid_bucket = event['cuboid_bucket'] write_id_index_status = event['write_id_index_status'] id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold']) obj_ind =", "morton index of a cuboid object key to the id in the #", "that are not part of this list get wrapped in DynamoClientError to ensure", "Just the name of the class or None. \"\"\" try: return str(type_).rsplit('.', 1)[1].rstrip(\"'>\")", "jitter backoff algorithm described in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # # It expects to get", "dictionary # { # 'id_index_table': ..., # 's3_index_table': ..., # 'id_count_table': ..., #", "last_delay = BASE_DELAY_TIME_SECS write_id_index_status['delay'] = round( random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3)) def get_class_name(type_): \"\"\"", "function goes to its catch handling step. The error information available to the", "the step function's retry statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS = [ 'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError',", "step function isn't as useful when wrapped, so the expected errors are enumerated", "described in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # # It expects to get from events dictionary", "list get wrapped in DynamoClientError to ensure the step function goes to its", "knows to retry. Args: write_id_index_status (dict): Update this dict. \"\"\" write_id_index_status['done'] = False", "in case of an error. # } # } import botocore from bossutils.aws", "function should proceed to its catch handling when it receives one of these", "calls this lambda. Since this lambda controls retries via event['write_id_index_status']['retries_left'] the step function", "<reponame>jhuapl-boss/boss-tools # Lambda to write the morton index of a cuboid object key", "from bossutils.aws import get_region import json import random from spdb.spatialdb.object_indices import ObjectIndices from", "# Prepare decorrelated jitter backoff delay. last_delay = int(write_id_index_status['delay']) if last_delay < BASE_DELAY_TIME_SECS:", "so the step function can fail when event['write_id_index_status']['retries_left'] == 0. \"\"\" def __init__(self,", "These derived exceptions of botocore.exceptions.ClientError will be not be retried by the step", "to its catch handling when it receives one of these exceptions. Derived exceptions", "from ex event['result'] = str(ex) prep_for_retry(write_id_index_status) return event def prep_for_retry(write_id_index_status): \"\"\" Update the", "the class name (w/o module(s) from the type. Args: type_ (type): Class as", "It expects to get from events dictionary # { # 'id_index_table': ..., #", "type. Args: type_ (type): Class as a type. Returns: (str|None): Just the name", "get wrapped in DynamoClientError to ensure the step function goes to its catch", "that calls this lambda. Since this lambda controls retries via event['write_id_index_status']['retries_left'] the step", "DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg = '{}: {}'.format(type(ex), ex) raise DynamoClientError(msg) from ex event['result'] =", "BASE_DELAY_TIME_SECS write_id_index_status['delay'] = round( random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3)) def get_class_name(type_): \"\"\" Get just", "a type. Returns: (str|None): Just the name of the class or None. \"\"\"", "write_id_index_status['retries_left'] = ( int(write_id_index_status['retries_left']) - 1) # Prepare decorrelated jitter backoff delay. last_delay", "module(s) from the type. Args: type_ (type): Class as a type. Returns: (str|None):", "# 'id_group': '...', # 'version': '...', # 'write_id_index_status': { # 'done': False, #", "= [ 'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException',", "get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg = '{}: {}'.format(type(ex), ex) raise DynamoClientError(msg) from ex", "'...', # 'id_group': '...', # 'version': '...', # 'write_id_index_status': { # 'done': False,", "exceptions of botocore.exceptions.ClientError will be not be retried by the step function that", "receives one of these exceptions. Derived exceptions of ClientError that are not part", "exceptions. Derived exceptions of ClientError that are not part of this list get", "Get just the class name (w/o module(s) from the type. Args: type_ (type):", "DynamoClientError(msg) from ex event['result'] = str(ex) prep_for_retry(write_id_index_status) return event def prep_for_retry(write_id_index_status): \"\"\" Update", "in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # # It expects to get from events dictionary #", "handling step. The error information available to the step function isn't as useful", "step function goes to its catch handling step. The error information available to", "statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS = [ 'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException',", "delay. last_delay = int(write_id_index_status['delay']) if last_delay < BASE_DELAY_TIME_SECS: last_delay = BASE_DELAY_TIME_SECS write_id_index_status['delay'] =", "decorrelated jitter backoff delay. last_delay = int(write_id_index_status['delay']) if last_delay < BASE_DELAY_TIME_SECS: last_delay =", "as useful when wrapped, so the expected errors are enumerated below and in", "isn't as useful when wrapped, so the expected errors are enumerated below and", "to the id in the # DynamoDB id index table. # # If", "event['write_id_index_status']['retries_left'] the step function should proceed to its catch handling when it receives", "# If there are failures, uses decorrelatd jitter backoff algorithm described in: #", "from the type. Args: type_ (type): Class as a type. Returns: (str|None): Just", "ObjectIndices from time import sleep BASE_DELAY_TIME_SECS = 5 \"\"\" These derived exceptions of", "step function can fail when event['write_id_index_status']['retries_left'] == 0. \"\"\" def __init__(self, message): super().__init__(message)", "< 1: if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg = '{}: {}'.format(type(ex), ex) raise", "\"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS = [ 'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException',", "Prepare decorrelated jitter backoff delay. last_delay = int(write_id_index_status['delay']) if last_delay < BASE_DELAY_TIME_SECS: last_delay", "Probably had a throttle or a ConditionCheckFailed. print('ClientError caught: {}'.format(ex)) if int(write_id_index_status['retries_left']) <", "'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException' ] class DynamoClientError(Exception): \"\"\" Wrap boto3 ClientError exceptions so", "5 \"\"\" These derived exceptions of botocore.exceptions.ClientError will be not be retried by", "{}'.format(type(ex), ex) raise DynamoClientError(msg) from ex event['result'] = str(ex) prep_for_retry(write_id_index_status) return event def", "# 'write_id_index_status': { # 'done': False, # 'delay': 0, # 'retries_left': int #", "jitter backoff delay. last_delay = int(write_id_index_status['delay']) if last_delay < BASE_DELAY_TIME_SECS: last_delay = BASE_DELAY_TIME_SECS", "dictionary so the step function knows to retry. Args: write_id_index_status (dict): Update this", "type_ (type): Class as a type. Returns: (str|None): Just the name of the", "ClientError that are not part of this list get wrapped in DynamoClientError to", "# } # } import botocore from bossutils.aws import get_region import json import", "last_delay * 3)) def get_class_name(type_): \"\"\" Get just the class name (w/o module(s)", "useful when wrapped, so the expected errors are enumerated below and in the", "controls retries via event['write_id_index_status']['retries_left'] the step function should proceed to its catch handling", "key to the id in the # DynamoDB id index table. # #", "this lambda. Since this lambda controls retries via event['write_id_index_status']['retries_left'] the step function should", "'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException' ] class DynamoClientError(Exception): \"\"\" Wrap", "# Probably had a throttle or a ConditionCheckFailed. print('ClientError caught: {}'.format(ex)) if int(write_id_index_status['retries_left'])", "are not part of this list get wrapped in DynamoClientError to ensure the", "failures, uses decorrelatd jitter backoff algorithm described in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # # It", "retry statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS = [ 'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException',", "'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException' ] class DynamoClientError(Exception): \"\"\" Wrap boto3", "..., # 's3_index_table': ..., # 'id_count_table': ..., # 'cuboid_bucket': ..., # 'id_index_new_chunk_threshold': ...,", "write_id_index_status['done'] = False write_id_index_status['retries_left'] = ( int(write_id_index_status['retries_left']) - 1) # Prepare decorrelated jitter", "{ # 'done': False, # 'delay': 0, # 'retries_left': int # How many", "\"\"\" Update the given dictionary so the step function knows to retry. Args:", "s3_index_table = event['s3_index_table'] id_count_table = event['id_count_table'] cuboid_bucket = event['cuboid_bucket'] write_id_index_status = event['write_id_index_status'] id_index_new_chunk_threshold", "expects to get from events dictionary # { # 'id_index_table': ..., # 's3_index_table':", "raise DynamoClientError(msg) from ex event['result'] = str(ex) prep_for_retry(write_id_index_status) return event def prep_for_retry(write_id_index_status): \"\"\"", "# 'cuboid_object_key': '...', # 'id_group': '...', # 'version': '...', # 'write_id_index_status': { #", "when event['write_id_index_status']['retries_left'] == 0. \"\"\" def __init__(self, message): super().__init__(message) def handler(event, context): id_index_table", "are enumerated below and in the step function's retry statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS =", "True except botocore.exceptions.ClientError as ex: # Probably had a throttle or a ConditionCheckFailed.", "when wrapped, so the expected errors are enumerated below and in the step", "Class as a type. Returns: (str|None): Just the name of the class or", "proceed to its catch handling when it receives one of these exceptions. Derived", "or a ConditionCheckFailed. print('ClientError caught: {}'.format(ex)) if int(write_id_index_status['retries_left']) < 1: if get_class_name(ex.__class__) in", "id_index_table = event['id_index_table'] s3_index_table = event['s3_index_table'] id_count_table = event['id_count_table'] cuboid_bucket = event['cuboid_bucket'] write_id_index_status", "int(write_id_index_status['retries_left']) - 1) # Prepare decorrelated jitter backoff delay. last_delay = int(write_id_index_status['delay']) if", "[ 'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException'", "from spdb.spatialdb.object_indices import ObjectIndices from time import sleep BASE_DELAY_TIME_SECS = 5 \"\"\" These", "event['id_count_table'] cuboid_bucket = event['cuboid_bucket'] write_id_index_status = event['write_id_index_status'] id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold']) obj_ind = ObjectIndices(", "'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException' ] class DynamoClientError(Exception):", "throttle or a ConditionCheckFailed. print('ClientError caught: {}'.format(ex)) if int(write_id_index_status['retries_left']) < 1: if get_class_name(ex.__class__)", "step function's retry statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS = [ 'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException',", "event['s3_index_table'] id_count_table = event['id_count_table'] cuboid_bucket = event['cuboid_bucket'] write_id_index_status = event['write_id_index_status'] id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold'])", "..., # 'id_index_new_chunk_threshold': ..., # 'cuboid_object_key': '...', # 'id_group': '...', # 'version': '...',", "# 'delay': 0, # 'retries_left': int # How many retries left in case", "of an error. # } # } import botocore from bossutils.aws import get_region", "# It expects to get from events dictionary # { # 'id_index_table': ...,", "'{}: {}'.format(type(ex), ex) raise DynamoClientError(msg) from ex event['result'] = str(ex) prep_for_retry(write_id_index_status) return event", "'id_index_table': ..., # 's3_index_table': ..., # 'id_count_table': ..., # 'cuboid_bucket': ..., # 'id_index_new_chunk_threshold':", "# 'id_index_new_chunk_threshold': ..., # 'cuboid_object_key': '...', # 'id_group': '...', # 'version': '...', #", "try: for obj_id in event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id, event['version']) write_id_index_status['done'] = True", "False, # 'delay': 0, # 'retries_left': int # How many retries left in", "the step function goes to its catch handling step. The error information available", "Derived exceptions of ClientError that are not part of this list get wrapped", "for obj_id in event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id, event['version']) write_id_index_status['done'] = True except", "lambda controls retries via event['write_id_index_status']['retries_left'] the step function should proceed to its catch", "prep_for_retry(write_id_index_status) return event def prep_for_retry(write_id_index_status): \"\"\" Update the given dictionary so the step", "expected errors are enumerated below and in the step function's retry statement. \"\"\"", "event['result'] = str(ex) prep_for_retry(write_id_index_status) return event def prep_for_retry(write_id_index_status): \"\"\" Update the given dictionary", "get from events dictionary # { # 'id_index_table': ..., # 's3_index_table': ..., #", "'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException' ] class DynamoClientError(Exception): \"\"\" Wrap boto3 ClientError", "this dict. \"\"\" write_id_index_status['done'] = False write_id_index_status['retries_left'] = ( int(write_id_index_status['retries_left']) - 1) #", "= event['s3_index_table'] id_count_table = event['id_count_table'] cuboid_bucket = event['cuboid_bucket'] write_id_index_status = event['write_id_index_status'] id_index_new_chunk_threshold =", "= True except botocore.exceptions.ClientError as ex: # Probably had a throttle or a", "many retries left in case of an error. # } # } import", "not part of this list get wrapped in DynamoClientError to ensure the step", "event['version']) write_id_index_status['done'] = True except botocore.exceptions.ClientError as ex: # Probably had a throttle", "s3_index_table, id_index_table, id_count_table, cuboid_bucket, get_region()) try: for obj_id in event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'],", "had a throttle or a ConditionCheckFailed. print('ClientError caught: {}'.format(ex)) if int(write_id_index_status['retries_left']) < 1:", "# 'cuboid_bucket': ..., # 'id_index_new_chunk_threshold': ..., # 'cuboid_object_key': '...', # 'id_group': '...', #", "= '{}: {}'.format(type(ex), ex) raise DynamoClientError(msg) from ex event['result'] = str(ex) prep_for_retry(write_id_index_status) return", "a ConditionCheckFailed. print('ClientError caught: {}'.format(ex)) if int(write_id_index_status['retries_left']) < 1: if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS:", "class name (w/o module(s) from the type. Args: type_ (type): Class as a", "left in case of an error. # } # } import botocore from", "event['write_id_index_status'] id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold']) obj_ind = ObjectIndices( s3_index_table, id_index_table, id_count_table, cuboid_bucket, get_region()) try:", "backoff delay. last_delay = int(write_id_index_status['delay']) if last_delay < BASE_DELAY_TIME_SECS: last_delay = BASE_DELAY_TIME_SECS write_id_index_status['delay']", "id_index_table, id_count_table, cuboid_bucket, get_region()) try: for obj_id in event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id,", "goes to its catch handling step. The error information available to the step", "= str(ex) prep_for_retry(write_id_index_status) return event def prep_for_retry(write_id_index_status): \"\"\" Update the given dictionary so", "one of these exceptions. Derived exceptions of ClientError that are not part of", "(event['id_index_new_chunk_threshold']) obj_ind = ObjectIndices( s3_index_table, id_index_table, id_count_table, cuboid_bucket, get_region()) try: for obj_id in", "id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id, event['version']) write_id_index_status['done'] = True except botocore.exceptions.ClientError as ex: # Probably", "\"\"\" write_id_index_status['done'] = False write_id_index_status['retries_left'] = ( int(write_id_index_status['retries_left']) - 1) # Prepare decorrelated", "function knows to retry. Args: write_id_index_status (dict): Update this dict. \"\"\" write_id_index_status['done'] =", "(dict): Update this dict. \"\"\" write_id_index_status['done'] = False write_id_index_status['retries_left'] = ( int(write_id_index_status['retries_left']) -", "the # DynamoDB id index table. # # If there are failures, uses", "\"\"\" def __init__(self, message): super().__init__(message) def handler(event, context): id_index_table = event['id_index_table'] s3_index_table =", "DO_NOT_WRAP_THESE_EXCEPTIONS = [ 'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException',", "be not be retried by the step function that calls this lambda. Since", "\"\"\" Get just the class name (w/o module(s) from the type. Args: type_", "its catch handling step. The error information available to the step function isn't", "def prep_for_retry(write_id_index_status): \"\"\" Update the given dictionary so the step function knows to", "( int(write_id_index_status['retries_left']) - 1) # Prepare decorrelated jitter backoff delay. last_delay = int(write_id_index_status['delay'])", "error information available to the step function isn't as useful when wrapped, so", "# How many retries left in case of an error. # } #", "the given dictionary so the step function knows to retry. Args: write_id_index_status (dict):", "the morton index of a cuboid object key to the id in the", "lambda. Since this lambda controls retries via event['write_id_index_status']['retries_left'] the step function should proceed", "Wrap boto3 ClientError exceptions so the step function can fail when event['write_id_index_status']['retries_left'] ==", "'done': False, # 'delay': 0, # 'retries_left': int # How many retries left", "write_id_index_status (dict): Update this dict. \"\"\" write_id_index_status['done'] = False write_id_index_status['retries_left'] = ( int(write_id_index_status['retries_left'])", "to get from events dictionary # { # 'id_index_table': ..., # 's3_index_table': ...,", "of a cuboid object key to the id in the # DynamoDB id", "# 'version': '...', # 'write_id_index_status': { # 'done': False, # 'delay': 0, #", "its catch handling when it receives one of these exceptions. Derived exceptions of", "# # It expects to get from events dictionary # { # 'id_index_table':", "random from spdb.spatialdb.object_indices import ObjectIndices from time import sleep BASE_DELAY_TIME_SECS = 5 \"\"\"", "it receives one of these exceptions. Derived exceptions of ClientError that are not", "'id_index_new_chunk_threshold': ..., # 'cuboid_object_key': '...', # 'id_group': '...', # 'version': '...', # 'write_id_index_status':", "except botocore.exceptions.ClientError as ex: # Probably had a throttle or a ConditionCheckFailed. print('ClientError", "type. Returns: (str|None): Just the name of the class or None. \"\"\" try:", "Lambda to write the morton index of a cuboid object key to the", "will be not be retried by the step function that calls this lambda.", "time import sleep BASE_DELAY_TIME_SECS = 5 \"\"\" These derived exceptions of botocore.exceptions.ClientError will", "of botocore.exceptions.ClientError will be not be retried by the step function that calls", "last_delay < BASE_DELAY_TIME_SECS: last_delay = BASE_DELAY_TIME_SECS write_id_index_status['delay'] = round( random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3))", "there are failures, uses decorrelatd jitter backoff algorithm described in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ #", "int # How many retries left in case of an error. # }", "# # If there are failures, uses decorrelatd jitter backoff algorithm described in:", "in DynamoClientError to ensure the step function goes to its catch handling step.", "= BASE_DELAY_TIME_SECS write_id_index_status['delay'] = round( random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3)) def get_class_name(type_): \"\"\" Get", "0, # 'retries_left': int # How many retries left in case of an", "via event['write_id_index_status']['retries_left'] the step function should proceed to its catch handling when it", "0. \"\"\" def __init__(self, message): super().__init__(message) def handler(event, context): id_index_table = event['id_index_table'] s3_index_table", "obj_id in event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id, event['version']) write_id_index_status['done'] = True except botocore.exceptions.ClientError", "if last_delay < BASE_DELAY_TIME_SECS: last_delay = BASE_DELAY_TIME_SECS write_id_index_status['delay'] = round( random.uniform(BASE_DELAY_TIME_SECS, last_delay *", "be retried by the step function that calls this lambda. Since this lambda", "cuboid object key to the id in the # DynamoDB id index table.", "{}'.format(ex)) if int(write_id_index_status['retries_left']) < 1: if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg = '{}:", "event['cuboid_bucket'] write_id_index_status = event['write_id_index_status'] id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold']) obj_ind = ObjectIndices( s3_index_table, id_index_table, id_count_table,", "get_region import json import random from spdb.spatialdb.object_indices import ObjectIndices from time import sleep", "can fail when event['write_id_index_status']['retries_left'] == 0. \"\"\" def __init__(self, message): super().__init__(message) def handler(event,", "= event['cuboid_bucket'] write_id_index_status = event['write_id_index_status'] id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold']) obj_ind = ObjectIndices( s3_index_table, id_index_table,", "'cuboid_bucket': ..., # 'id_index_new_chunk_threshold': ..., # 'cuboid_object_key': '...', # 'id_group': '...', # 'version':", "= int(write_id_index_status['delay']) if last_delay < BASE_DELAY_TIME_SECS: last_delay = BASE_DELAY_TIME_SECS write_id_index_status['delay'] = round( random.uniform(BASE_DELAY_TIME_SECS,", "# 'id_count_table': ..., # 'cuboid_bucket': ..., # 'id_index_new_chunk_threshold': ..., # 'cuboid_object_key': '...', #", "import botocore from bossutils.aws import get_region import json import random from spdb.spatialdb.object_indices import", "exceptions so the step function can fail when event['write_id_index_status']['retries_left'] == 0. \"\"\" def", "algorithm described in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # # It expects to get from events", "fail when event['write_id_index_status']['retries_left'] == 0. \"\"\" def __init__(self, message): super().__init__(message) def handler(event, context):", "def get_class_name(type_): \"\"\" Get just the class name (w/o module(s) from the type.", "= event['write_id_index_status'] id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold']) obj_ind = ObjectIndices( s3_index_table, id_index_table, id_count_table, cuboid_bucket, get_region())", "\"\"\" Wrap boto3 ClientError exceptions so the step function can fail when event['write_id_index_status']['retries_left']", "name (w/o module(s) from the type. Args: type_ (type): Class as a type.", "id_count_table = event['id_count_table'] cuboid_bucket = event['cuboid_bucket'] write_id_index_status = event['write_id_index_status'] id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold']) obj_ind", "of the class or None. \"\"\" try: return str(type_).rsplit('.', 1)[1].rstrip(\"'>\") except IndexError: return", "below and in the step function's retry statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS = [ 'ClientError',", "of ClientError that are not part of this list get wrapped in DynamoClientError", "BASE_DELAY_TIME_SECS = 5 \"\"\" These derived exceptions of botocore.exceptions.ClientError will be not be", "ex event['result'] = str(ex) prep_for_retry(write_id_index_status) return event def prep_for_retry(write_id_index_status): \"\"\" Update the given", "'id_count_table': ..., # 'cuboid_bucket': ..., # 'id_index_new_chunk_threshold': ..., # 'cuboid_object_key': '...', # 'id_group':", "# 'retries_left': int # How many retries left in case of an error.", "obj_id, event['version']) write_id_index_status['done'] = True except botocore.exceptions.ClientError as ex: # Probably had a", "} # } import botocore from bossutils.aws import get_region import json import random", "uses decorrelatd jitter backoff algorithm described in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # # It expects", "import sleep BASE_DELAY_TIME_SECS = 5 \"\"\" These derived exceptions of botocore.exceptions.ClientError will be", "write_id_index_status['delay'] = round( random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3)) def get_class_name(type_): \"\"\" Get just the", "Update the given dictionary so the step function knows to retry. Args: write_id_index_status", "If there are failures, uses decorrelatd jitter backoff algorithm described in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/", "write the morton index of a cuboid object key to the id in", "the type. Args: type_ (type): Class as a type. Returns: (str|None): Just the", "name of the class or None. \"\"\" try: return str(type_).rsplit('.', 1)[1].rstrip(\"'>\") except IndexError:", "random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3)) def get_class_name(type_): \"\"\" Get just the class name (w/o", "when it receives one of these exceptions. Derived exceptions of ClientError that are", "exceptions of ClientError that are not part of this list get wrapped in", "- 1) # Prepare decorrelated jitter backoff delay. last_delay = int(write_id_index_status['delay']) if last_delay", "Args: type_ (type): Class as a type. Returns: (str|None): Just the name of", "'id_group': '...', # 'version': '...', # 'write_id_index_status': { # 'done': False, # 'delay':", "catch handling when it receives one of these exceptions. Derived exceptions of ClientError", "so the expected errors are enumerated below and in the step function's retry", "def __init__(self, message): super().__init__(message) def handler(event, context): id_index_table = event['id_index_table'] s3_index_table = event['s3_index_table']", "str(ex) prep_for_retry(write_id_index_status) return event def prep_for_retry(write_id_index_status): \"\"\" Update the given dictionary so the", "= ( int(write_id_index_status['retries_left']) - 1) # Prepare decorrelated jitter backoff delay. last_delay =", "table. # # If there are failures, uses decorrelatd jitter backoff algorithm described", "import get_region import json import random from spdb.spatialdb.object_indices import ObjectIndices from time import", "'TableNotFoundException' ] class DynamoClientError(Exception): \"\"\" Wrap boto3 ClientError exceptions so the step function", "'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException' ] class", "event['id_index_table'] s3_index_table = event['s3_index_table'] id_count_table = event['id_count_table'] cuboid_bucket = event['cuboid_bucket'] write_id_index_status = event['write_id_index_status']", "to the step function isn't as useful when wrapped, so the expected errors", "return event def prep_for_retry(write_id_index_status): \"\"\" Update the given dictionary so the step function", "'write_id_index_status': { # 'done': False, # 'delay': 0, # 'retries_left': int # How", "round( random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3)) def get_class_name(type_): \"\"\" Get just the class name", "..., # 'cuboid_object_key': '...', # 'id_group': '...', # 'version': '...', # 'write_id_index_status': {", "class DynamoClientError(Exception): \"\"\" Wrap boto3 ClientError exceptions so the step function can fail", "Returns: (str|None): Just the name of the class or None. \"\"\" try: return", "step function knows to retry. Args: write_id_index_status (dict): Update this dict. \"\"\" write_id_index_status['done']", "3)) def get_class_name(type_): \"\"\" Get just the class name (w/o module(s) from the", "(w/o module(s) from the type. Args: type_ (type): Class as a type. Returns:", "in DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg = '{}: {}'.format(type(ex), ex) raise DynamoClientError(msg) from ex event['result']", "'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException', 'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException' ]", "obj_ind = ObjectIndices( s3_index_table, id_index_table, id_count_table, cuboid_bucket, get_region()) try: for obj_id in event['id_group']:", "of this list get wrapped in DynamoClientError to ensure the step function goes", "# 's3_index_table': ..., # 'id_count_table': ..., # 'cuboid_bucket': ..., # 'id_index_new_chunk_threshold': ..., #", "sleep BASE_DELAY_TIME_SECS = 5 \"\"\" These derived exceptions of botocore.exceptions.ClientError will be not", "step. The error information available to the step function isn't as useful when", "get_class_name(type_): \"\"\" Get just the class name (w/o module(s) from the type. Args:", "of these exceptions. Derived exceptions of ClientError that are not part of this", "(type): Class as a type. Returns: (str|None): Just the name of the class", "step function that calls this lambda. Since this lambda controls retries via event['write_id_index_status']['retries_left']", "this list get wrapped in DynamoClientError to ensure the step function goes to", "as ex: # Probably had a throttle or a ConditionCheckFailed. print('ClientError caught: {}'.format(ex))", "import ObjectIndices from time import sleep BASE_DELAY_TIME_SECS = 5 \"\"\" These derived exceptions", "given dictionary so the step function knows to retry. Args: write_id_index_status (dict): Update", "in the # DynamoDB id index table. # # If there are failures,", "# } import botocore from bossutils.aws import get_region import json import random from", "enumerated below and in the step function's retry statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS = [", "if int(write_id_index_status['retries_left']) < 1: if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg = '{}: {}'.format(type(ex),", "are failures, uses decorrelatd jitter backoff algorithm described in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # #", "events dictionary # { # 'id_index_table': ..., # 's3_index_table': ..., # 'id_count_table': ...,", "def handler(event, context): id_index_table = event['id_index_table'] s3_index_table = event['s3_index_table'] id_count_table = event['id_count_table'] cuboid_bucket", "wrapped in DynamoClientError to ensure the step function goes to its catch handling", "raise msg = '{}: {}'.format(type(ex), ex) raise DynamoClientError(msg) from ex event['result'] = str(ex)", "write_id_index_status['done'] = True except botocore.exceptions.ClientError as ex: # Probably had a throttle or", "1: if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg = '{}: {}'.format(type(ex), ex) raise DynamoClientError(msg)", "the step function isn't as useful when wrapped, so the expected errors are", "retries via event['write_id_index_status']['retries_left'] the step function should proceed to its catch handling when", "the step function knows to retry. Args: write_id_index_status (dict): Update this dict. \"\"\"", "< BASE_DELAY_TIME_SECS: last_delay = BASE_DELAY_TIME_SECS write_id_index_status['delay'] = round( random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3)) def", "'InternalServerError', 'ItemCollectionSizeLimitExceededException', 'LimitExceededException', 'ProvisionedThroughputExceededException', 'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException' ] class DynamoClientError(Exception): \"\"\"", "decorrelatd jitter backoff algorithm described in: # https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # # It expects to", "last_delay = int(write_id_index_status['delay']) if last_delay < BASE_DELAY_TIME_SECS: last_delay = BASE_DELAY_TIME_SECS write_id_index_status['delay'] = round(", "retries left in case of an error. # } # } import botocore", "print('ClientError caught: {}'.format(ex)) if int(write_id_index_status['retries_left']) < 1: if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg", "prep_for_retry(write_id_index_status): \"\"\" Update the given dictionary so the step function knows to retry.", "not be retried by the step function that calls this lambda. Since this", "id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold']) obj_ind = ObjectIndices( s3_index_table, id_index_table, id_count_table, cuboid_bucket, get_region()) try: for", "a throttle or a ConditionCheckFailed. print('ClientError caught: {}'.format(ex)) if int(write_id_index_status['retries_left']) < 1: if", "dict. \"\"\" write_id_index_status['done'] = False write_id_index_status['retries_left'] = ( int(write_id_index_status['retries_left']) - 1) # Prepare", "spdb.spatialdb.object_indices import ObjectIndices from time import sleep BASE_DELAY_TIME_SECS = 5 \"\"\" These derived", "import json import random from spdb.spatialdb.object_indices import ObjectIndices from time import sleep BASE_DELAY_TIME_SECS", "botocore.exceptions.ClientError will be not be retried by the step function that calls this", "ObjectIndices( s3_index_table, id_index_table, id_count_table, cuboid_bucket, get_region()) try: for obj_id in event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold,", "error. # } # } import botocore from bossutils.aws import get_region import json", "part of this list get wrapped in DynamoClientError to ensure the step function", "caught: {}'.format(ex)) if int(write_id_index_status['retries_left']) < 1: if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg =", "id index table. # # If there are failures, uses decorrelatd jitter backoff", "'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException' ] class DynamoClientError(Exception): \"\"\" Wrap boto3 ClientError exceptions so the", "ex: # Probably had a throttle or a ConditionCheckFailed. print('ClientError caught: {}'.format(ex)) if", "from events dictionary # { # 'id_index_table': ..., # 's3_index_table': ..., # 'id_count_table':", "(str|None): Just the name of the class or None. \"\"\" try: return str(type_).rsplit('.',", "a cuboid object key to the id in the # DynamoDB id index", "handler(event, context): id_index_table = event['id_index_table'] s3_index_table = event['s3_index_table'] id_count_table = event['id_count_table'] cuboid_bucket =", "context): id_index_table = event['id_index_table'] s3_index_table = event['s3_index_table'] id_count_table = event['id_count_table'] cuboid_bucket = event['cuboid_bucket']", "index of a cuboid object key to the id in the # DynamoDB", "the name of the class or None. \"\"\" try: return str(type_).rsplit('.', 1)[1].rstrip(\"'>\") except", "# 'done': False, # 'delay': 0, # 'retries_left': int # How many retries", "botocore.exceptions.ClientError as ex: # Probably had a throttle or a ConditionCheckFailed. print('ClientError caught:", "the step function can fail when event['write_id_index_status']['retries_left'] == 0. \"\"\" def __init__(self, message):", "] class DynamoClientError(Exception): \"\"\" Wrap boto3 ClientError exceptions so the step function can", "int(write_id_index_status['delay']) if last_delay < BASE_DELAY_TIME_SECS: last_delay = BASE_DELAY_TIME_SECS write_id_index_status['delay'] = round( random.uniform(BASE_DELAY_TIME_SECS, last_delay", "event['write_id_index_status']['retries_left'] == 0. \"\"\" def __init__(self, message): super().__init__(message) def handler(event, context): id_index_table =", "== 0. \"\"\" def __init__(self, message): super().__init__(message) def handler(event, context): id_index_table = event['id_index_table']", "'ReplicaAlreadyExistsException', 'ReplicaNotFoundException', 'ResourceInUseException', 'ResourceNotFoundException', 'TableNotFoundException' ] class DynamoClientError(Exception): \"\"\" Wrap boto3 ClientError exceptions", "1) # Prepare decorrelated jitter backoff delay. last_delay = int(write_id_index_status['delay']) if last_delay <", "BASE_DELAY_TIME_SECS: last_delay = BASE_DELAY_TIME_SECS write_id_index_status['delay'] = round( random.uniform(BASE_DELAY_TIME_SECS, last_delay * 3)) def get_class_name(type_):", "__init__(self, message): super().__init__(message) def handler(event, context): id_index_table = event['id_index_table'] s3_index_table = event['s3_index_table'] id_count_table", "the id in the # DynamoDB id index table. # # If there", "from time import sleep BASE_DELAY_TIME_SECS = 5 \"\"\" These derived exceptions of botocore.exceptions.ClientError", "The error information available to the step function isn't as useful when wrapped,", "obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id, event['version']) write_id_index_status['done'] = True except botocore.exceptions.ClientError as ex: #", "boto3 ClientError exceptions so the step function can fail when event['write_id_index_status']['retries_left'] == 0.", "https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # # It expects to get from events dictionary # { #", "object key to the id in the # DynamoDB id index table. #", "should proceed to its catch handling when it receives one of these exceptions.", "write_id_index_status = event['write_id_index_status'] id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold']) obj_ind = ObjectIndices( s3_index_table, id_index_table, id_count_table, cuboid_bucket,", "\"\"\" These derived exceptions of botocore.exceptions.ClientError will be not be retried by the", "msg = '{}: {}'.format(type(ex), ex) raise DynamoClientError(msg) from ex event['result'] = str(ex) prep_for_retry(write_id_index_status)", "function isn't as useful when wrapped, so the expected errors are enumerated below", "an error. # } # } import botocore from bossutils.aws import get_region import", "wrapped, so the expected errors are enumerated below and in the step function's", "} import botocore from bossutils.aws import get_region import json import random from spdb.spatialdb.object_indices", "'...', # 'write_id_index_status': { # 'done': False, # 'delay': 0, # 'retries_left': int", "message): super().__init__(message) def handler(event, context): id_index_table = event['id_index_table'] s3_index_table = event['s3_index_table'] id_count_table =", "retry. Args: write_id_index_status (dict): Update this dict. \"\"\" write_id_index_status['done'] = False write_id_index_status['retries_left'] =", "in the step function's retry statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS = [ 'ClientError', 'ConditionalCheckFailedException', 'GlobalTableNotFoundException',", "and in the step function's retry statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS = [ 'ClientError', 'ConditionalCheckFailedException',", "# DynamoDB id index table. # # If there are failures, uses decorrelatd", "# https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ # # It expects to get from events dictionary # {", "int(write_id_index_status['retries_left']) < 1: if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS: raise msg = '{}: {}'.format(type(ex), ex)", "cuboid_bucket = event['cuboid_bucket'] write_id_index_status = event['write_id_index_status'] id_index_new_chunk_threshold = (event['id_index_new_chunk_threshold']) obj_ind = ObjectIndices( s3_index_table,", "these exceptions. Derived exceptions of ClientError that are not part of this list", "event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id, event['version']) write_id_index_status['done'] = True except botocore.exceptions.ClientError as ex:", "ConditionCheckFailed. print('ClientError caught: {}'.format(ex)) if int(write_id_index_status['retries_left']) < 1: if get_class_name(ex.__class__) in DO_NOT_WRAP_THESE_EXCEPTIONS: raise", "to ensure the step function goes to its catch handling step. The error", "ex) raise DynamoClientError(msg) from ex event['result'] = str(ex) prep_for_retry(write_id_index_status) return event def prep_for_retry(write_id_index_status):", "so the step function knows to retry. Args: write_id_index_status (dict): Update this dict.", "False write_id_index_status['retries_left'] = ( int(write_id_index_status['retries_left']) - 1) # Prepare decorrelated jitter backoff delay.", "errors are enumerated below and in the step function's retry statement. \"\"\" DO_NOT_WRAP_THESE_EXCEPTIONS", "..., # 'cuboid_bucket': ..., # 'id_index_new_chunk_threshold': ..., # 'cuboid_object_key': '...', # 'id_group': '...',", "to its catch handling step. The error information available to the step function", "import random from spdb.spatialdb.object_indices import ObjectIndices from time import sleep BASE_DELAY_TIME_SECS = 5", "by the step function that calls this lambda. Since this lambda controls retries", "available to the step function isn't as useful when wrapped, so the expected", "the class or None. \"\"\" try: return str(type_).rsplit('.', 1)[1].rstrip(\"'>\") except IndexError: return None", "..., # 'id_count_table': ..., # 'cuboid_bucket': ..., # 'id_index_new_chunk_threshold': ..., # 'cuboid_object_key': '...',", "get_region()) try: for obj_id in event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id, event['version']) write_id_index_status['done'] =", "ClientError exceptions so the step function can fail when event['write_id_index_status']['retries_left'] == 0. \"\"\"", "id_count_table, cuboid_bucket, get_region()) try: for obj_id in event['id_group']: obj_ind.write_id_index( id_index_new_chunk_threshold, event['cuboid_object_key'], obj_id, event['version'])", "* 3)) def get_class_name(type_): \"\"\" Get just the class name (w/o module(s) from", "'delay': 0, # 'retries_left': int # How many retries left in case of", "json import random from spdb.spatialdb.object_indices import ObjectIndices from time import sleep BASE_DELAY_TIME_SECS =", "'cuboid_object_key': '...', # 'id_group': '...', # 'version': '...', # 'write_id_index_status': { # 'done':", "# { # 'id_index_table': ..., # 's3_index_table': ..., # 'id_count_table': ..., # 'cuboid_bucket':", "DynamoDB id index table. # # If there are failures, uses decorrelatd jitter", "Args: write_id_index_status (dict): Update this dict. \"\"\" write_id_index_status['done'] = False write_id_index_status['retries_left'] = (", "event def prep_for_retry(write_id_index_status): \"\"\" Update the given dictionary so the step function knows", "function that calls this lambda. Since this lambda controls retries via event['write_id_index_status']['retries_left'] the" ]